text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameters
Step2: Observe and Estimate Using Max-Estimator
Step3: Show Results for Additional Estimators
|
<ASSISTANT_TASK:>
Python Code:
# importing
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# showing figures inline
%matplotlib inline
# plotting options
font = {'size' : 30}
plt.rc('font', **font)
plt.rc('text', usetex=True)
matplotlib.rc('figure', figsize=(30, 15) )
# define (unknown) group size
N = 1000#np.random.randint( 1000 )
taxis = [ t for t in range( N ) ]
# size of subgroup to be observed
M = N // 10
# number of observations
N_obser = int( 1e3 )
# allowing for multiple observations (Modus Operandi: same number twice)?
MO = 0
# Sample distances used in order statistics for doing quantils
Q = 10
# initialize array for collecting several estimations to evaluate bias of estimator
estimators = np.zeros( N_obser )
for _k in range( N_obser ):
sample = np.random.choice( taxis, replace=MO )
estimators[ _k ] = np.max( sample )
# get sample average and compare to true value
print('True value: {}'.format( N ) )
print('Average estimate: {}'.format( np.average( estimators ) ) )
# define range of group size to be analyzed
group_sizes = range( 1, N//10, 5 )
# initialize arrays for collecting estimator values
est_max = np.zeros_like( group_sizes )
est_maxmin = np.zeros_like( group_sizes )
#est_quantiles = np.zeros_like( group_sizes )
est_avg = np.zeros_like( group_sizes )
# loop for group sizes
for ind_gs, val_gs in enumerate( group_sizes ):
# initialize array for collecting several estimations to evaluate bias of estimator
estimators_max = np.zeros( N_obser )
estimators_maxmin = np.zeros( N_obser )
estimators_avg = np.zeros( N_obser )
#estimators_quantiles = np.zeros( N_obser )
# loop for realizations
for _k in range( N_obser ):
# sample and get estimator
sample = np.random.choice( taxis, size = val_gs, replace = MO )
estimators_max[ _k ] = np.max( sample )
estimators_maxmin[ _k ] = np.max( sample ) + np.min( sample )
#estimators_quantiles[ _k ] = np.sort( sample )[ -Q ] + np.sort( sample )[ Q ]
estimators_avg[ _k ] = 2 * np.average( sample )
# find average value of estimation for given group size
est_max[ ind_gs ] = np.average( estimators_max )
est_maxmin[ ind_gs ] = np.average( estimators_maxmin )
est_avg[ ind_gs ] = np.average( estimators_avg )
#est_quantiles[ ind_gs ] = np.average( estimators_quantiles )
plt.figure()
plt.plot( group_sizes, est_max, label='$\hat{E}( \\max(X_1,\ldots,X_K) )$' )
plt.plot( group_sizes, est_maxmin, label='$\hat{E}( \\max(X_1,\ldots,X_K) + \\min(X_1,\ldots,X_K) )$' )
plt.plot( group_sizes, est_avg, label='$\hat{E}( \\frac{2}{K}\sum_{i=1}^K X_i)$' )
#plt.plot( group_sizes, est_quantiles, label='$\hat{E}( X_{(Q)} + X_{(K-Q)} )$' )
plt.grid(True)
plt.legend( loc='best' )
plt.title('N = {}'.format(N) )
plt.xlabel('$K$')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we shall compute the word counts on the lists.
Step2: Let's print some useful descriptive data
Step3: We want to compute the list of common words that are introduced by different people in different lists.
Step4: Let's identify "influential words" (see definition below) and print them
|
<ASSISTANT_TASK:>
Python Code:
from bigbang.archive import Archive
from bigbang.archive import load as load_archive
import bigbang.parse as parse
import bigbang.graph as graph
import bigbang.mailman as mailman
import bigbang.process as process
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
from pprint import pprint as pp
import pytz
import numpy as np
import math
import nltk
from itertools import repeat
from nltk.stem.lancaster import LancasterStemmer
st = LancasterStemmer()
from nltk.corpus import stopwords
import re
import os
#insert TWO names of mailing lists (no more, no less)
cwd = os.getcwd()
archives_names = ["ietf-privacy", "architecture-discuss"]
archives_paths = list()
for archive_name in archives_names:
archives_paths.append('../../archives/'+archive_name+'.csv')
archives_list = [load_archive(archive_path).data for archive_path in archives_paths]
archives = Archive(pd.concat(archives_list))
archives_data = archives.data
#to stem or not to stem?
#if stem is set to True, then words are converted into their stem(no plurals, no suffixes, etc.)
#if stem is set to False, then words are processed for their literal spelling
stem = False
#Compute word count on the first list
wordcount1={}
for row in archives_list[0].iterrows():
if row[1]["Body"] is not None:
w = row[1]["Body"].replace("'", "")
k = re.sub(r'[^\w]', ' ', w)
t = nltk.tokenize.word_tokenize(k)
for g in t:
try:
if stem: word = st.stem(g)
else: word = g
except:
print g
pass
if word in stopwords.words('english'):
continue
if word not in wordcount1:
wordcount1[word] = [1]
wordcount1[word].append(row[0])
wordcount1[word].append(row[1]["Date"])
wordcount1[word].append(row[1]["From"])
wordcount1[word].append(row[1]["In-Reply-To"])
else:
wordcount1[word][0] += 1
wd = wordcount #In case
#Compute word count on the second list
wordcount2={}
for row in archives_list[1].iterrows():
if row[1]["Body"] is not None:
w = row[1]["Body"].replace("'", "")
k = re.sub(r'[^\w]', ' ', w)
t = nltk.tokenize.word_tokenize(k)
for g in t:
try:
if stem: word = st.stem(g)
else: word = g
except:
pass
if word in stopwords.words('english'):
continue
if word not in wordcount2:
wordcount2[word] = [1]
wordcount2[word].append(row[0])
wordcount2[word].append(row[1]["Date"])
wordcount2[word].append(row[1]["From"])
wordcount2[word].append(row[1]["In-Reply-To"])
else:
wordcount2[word][0] += 1
#Create and export a wordcount information dataframe per mailing list
#set the variable 'path' as a valid directory path where to store the files
asd = pd.DataFrame(wordcount)
new_dataframe = asd.transpose()
new_dataframe.columns = ["Wordcount", "Message-ID", "Date","From","In-Reply-To"]
new_dataframe.to_csv(cwd+'/wordcount_info_'+archives_names[0]+'.csv')
asd1 = pd.DataFrame(wordcount1)
new_dataframe1 = asd1.transpose()
new_dataframe1.columns = ["Wordcount", "Message-ID", "Date","From","In-Reply-To"]
new_dataframe1.to_csv(cwd+'/wordcount_info_'+archives_names[1]+'.csv')
print 'File exported!'
print 'Check '+cwd+'/wordcount_info_'+archives_names[0]+'.csv and '+cwd+'wordcount_info_'+archives_names[1]+'.csv'
print 'Number of unique words in mailinglist '+archives_names[0]
print len(wordcount1)
print 'Number of unique words in mailinglist '+archives_names[1]
print len(wordcount2)
samewordcount=0
for word in wordcount1:
if word in wordcount2:
samewordcount += 1
print 'Number of same unique words in two mailing lists'
print samewordcount
samewords = {}
for word in wordcount1:
if word in wordcount2:
if wordcount1[word][3] == wordcount2[word][3]:
samewords[word] = [wordcount1[word][0],wordcount1[word][3],wordcount1[word][2],
wordcount2[word][0],wordcount2[word][3],wordcount2[word][2]]
print 'Total number of same words that are introduced by same people'
print len(samewords.keys())
#build dataframe of information of those words introduced by same people
#and export to file
df1 = pd.DataFrame(samewords)
samewords_sameauthor_dataframe = df1.transpose()
samewords_sameauthor_dataframe.columns = ["Wordcount1", "From1", "Date1","Wordcount2", "From2", "Date2"]
samewords_sameauthor_dataframe.to_csv(cwd+'/samewords_sameauthor.csv')
print 'File exported!'
print 'Check '+cwd+'/samewords_sameauthor.csv'
samewordcount = 0
for word in wordcount1:
if wordcount1[word][0] >= 100 and wordcount1[word][0] <= 500:
if word in wordcount2:
if wordcount2[word][0] >= 100 and wordcount2[word][0] <= 500:
samewordcount += 1
print 'Among 100-500 appearance words, the number of common words between two mailing-list'
print samewordcount
same_person_count = 0
for word in wordcount1:
if wordcount1[word][0] >= 100 and wordcount1[word][0] <= 500:
if word in wordcount2:
if wordcount2[word][0] >= 100 and wordcount2[word][0] <= 500:
if wordcount1[word][3] == wordcount2[word][3]:
#print word
same_person_count += 1
print 'Among 100-500 appearance words, the number of common words between two mailing-list that are first introduced by same people'
print same_person_count
#compute common word list(introduced by different people in different lists)
#and print the number
commonwords = {}
for word in wordcount1:
if wordcount1[word][0] >= 100 and wordcount1[word][0] <= 500:
if word in wordcount2:
if wordcount2[word][0] >= 100 and wordcount2[word][0] <= 500:
if wordcount1[word][3] != wordcount2[word][3]:
commonwords[word] = [wordcount1[word][0],wordcount1[word][3],wordcount1[word][2],
wordcount2[word][0],wordcount2[word][3],wordcount2[word][2]]
print 'Number of common words introduced by different people in different lists'
print len(commonwords)
#build dataframe of information of those words introduced by different people
#and export to file
df1 = pd.DataFrame(commonwords)
commonword_differentauthor_dataframe = df1.transpose()
commonword_differentauthor_dataframe.columns = ["Wordcount1", "From1", "Date1","Wordcount2", "From2", "Date2"]
commonword_differentauthor_dataframe.to_csv(cwd+'/commonwords_differentauthor.csv')
print 'File exported!'
print 'Check '+cwd+'/commonwords_differentauthor.csv'
#Compute 'influential words', the list of words that have potential of idea flows.
#Definition: A is introduced by p in list1 first, then q saw it and then
#introduced the word A to list 2, or vice versa. We defined q saw as q said something in list1 before p poped out the word.
#Total list of such word A.
#Build a dictionary with senders and date of first participation for each mailing list
first_participation1 = {}
for row in archives_list[0].iterrows():
if row[1]["From"] not in first_participation1:
first_participation1[row[1]["From"]] = row[1]["Date"]
first_participation2 = {}
for row in archives_list[1].iterrows():
if row[1]["From"] not in first_participation2:
first_participation2[row[1]["From"]] = row[1]["Date"]
time_influence = 0
influence_list = {}
for word in commonwords:
if commonwords[word][2] > commonwords[word][5]: #Author2 comes first
if commonwords[word][1] in first_participation2: #Check if author1 in list2
if first_participation2[commonwords[word][1]] < commonwords[word][5]: #Check if author1\
#in list2 and exists before the word first introduced in list2
influence_list[word] = commonwords[word]
time_influence += 1
else: #Author1 comes first
if commonwords[word][4] in first_participation1:
if first_participation1[commonwords[word][4]] < commonwords[word][2]:
influence_list[word] = commonwords[word]
time_influence += 1
#print the list of influential words (exclude numbers)
if len(influence_list.keys()) == 0: print 'No influential words detected'
for word, info in influence_list.iteritems():
if not word.isdigit():
print '"'+word+'"'
print info
print ' '
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Spector dataset is distributed with statsmodels. You can access a vector of values for the dependent variable (endog) and a matrix of regressors (exog) like this
Step2: Them, we add a constant to the matrix of regressors
Step3: To create your own Likelihood Model, you simply need to overwrite the loglike method.
Step4: Estimate the model and print a summary
Step5: Compare your Probit implementation to statsmodels' "canned" implementation
Step6: Notice that the GenericMaximumLikelihood class provides automatic differentiation, so we did not have to provide Hessian or Score functions in order to calculate the covariance estimates.
Step7: New Model Class
Step8: Two important things to notice
Step9: The model we are interested in has a vector of non-negative integers as
Step10: Then, we fit the model and extract some information
Step11: Extract parameter estimates, standard errors, p-values, AIC, etc.
Step12: As usual, you can obtain a full list of available information by typing
Step13: Testing
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
data = sm.datasets.spector.load_pandas()
exog = data.exog
endog = data.endog
print(sm.datasets.spector.NOTE)
print(data.exog.head())
exog = sm.add_constant(exog, prepend=True)
class MyProbit(GenericLikelihoodModel):
def loglike(self, params):
exog = self.exog
endog = self.endog
q = 2 * endog - 1
return stats.norm.logcdf(q*np.dot(exog, params)).sum()
sm_probit_manual = MyProbit(endog, exog).fit()
print(sm_probit_manual.summary())
sm_probit_canned = sm.Probit(endog, exog).fit()
print(sm_probit_canned.params)
print(sm_probit_manual.params)
print(sm_probit_canned.cov_params())
print(sm_probit_manual.cov_params())
import numpy as np
from scipy.stats import nbinom
def _ll_nb2(y, X, beta, alph):
mu = np.exp(np.dot(X, beta))
size = 1/alph
prob = size/(size+mu)
ll = nbinom.logpmf(y, size, prob)
return ll
from statsmodels.base.model import GenericLikelihoodModel
class NBin(GenericLikelihoodModel):
def __init__(self, endog, exog, **kwds):
super(NBin, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
alph = params[-1]
beta = params[:-1]
ll = _ll_nb2(self.endog, self.exog, beta, alph)
return -ll
def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):
# we have one additional parameter and we need to add it for summary
self.exog_names.append('alpha')
if start_params == None:
# Reasonable starting values
start_params = np.append(np.zeros(self.exog.shape[1]), .5)
# intercept
start_params[-2] = np.log(self.endog.mean())
return super(NBin, self).fit(start_params=start_params,
maxiter=maxiter, maxfun=maxfun,
**kwds)
import statsmodels.api as sm
medpar = sm.datasets.get_rdataset("medpar", "COUNT", cache=True).data
medpar.head()
y = medpar.los
X = medpar[["type2", "type3", "hmo", "white"]].copy()
X["constant"] = 1
mod = NBin(y, X)
res = mod.fit()
print('Parameters: ', res.params)
print('Standard errors: ', res.bse)
print('P-values: ', res.pvalues)
print('AIC: ', res.aic)
print(res.summary())
res_nbin = sm.NegativeBinomial(y, X).fit(disp=0)
print(res_nbin.summary())
print(res_nbin.params)
print(res_nbin.bse)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Wavelength Response
Step2: Temperature Response
Step3: The main question is
Step4: Boerner et al. (2012) use the coronal abundances of Feldman and Widing (1993) and the ionization balances of Dere et al. (2009). We also want to make sure we are calculating all of the emissivities in units of photons rather than ergs as this makes it easier when multiplying by the instrument response function.
Step5: Now iterate through the ion list, calculating the emission and subsequently the contribution function at each stage and then interpolating the wavelength response function to the appropriate wavelengths.
|
<ASSISTANT_TASK:>
Python Code:
import json
import numpy as np
import h5py
import seaborn as sns
from scipy.interpolate import splev,splrep
import matplotlib.pyplot as plt
import astropy.units as u
from sunpy.instr import aia
import ChiantiPy.core as ch
import ChiantiPy.tools.data as ch_data
%matplotlib inline
response = aia.Response(ssw_path='/Users/willbarnes/Documents/Rice/Research/ssw/',
#channel_list=[131,171,193,211,304]
)
response.calculate_wavelength_response(include_crosstalk=True)
response.peek_wavelength_response()
data = np.loadtxt('../aia_sample_data/aia_wresponse_raw.dat')
channels = sorted(list(response.wavelength_response.keys()))
ssw_results = {}
for i in range(len(channels)):
ssw_results[channels[i]] = {'wavelength':data[:,0],
'response':data[:,i+1]}
fig,axes = plt.subplots(3,3,figsize=(12,12))
for c,ax in zip(channels,axes.flatten()):
#ssw
ax.plot(ssw_results[c]['wavelength'],ssw_results[c]['response'],
#color=response.channel_colors[c],
label='ssw')
#sunpy
ax.plot(response.wavelength_response[c]['wavelength'],response.wavelength_response[c]['response'],
#color=response.channel_colors[c],
#marker='.',ms=6,markevery=5,
label='SunPy',linestyle=':',alpha=0.95,lw=2)
if c!=335 and c!=304:
ax.set_xlim([c-20,c+20])
#if c==335:
# ax.set_xlim([120,140])
#if c==304:
# ax.set_xlim([80,100])
ax.set_title('{} $\mathrm{{\mathring{{A}}}}$'.format(c),fontsize=20)
ax.set_xlabel(r'$\lambda$ ({0:latex})'.format(response.wavelength_response[c]['wavelength'].unit),fontsize=20)
ax.set_ylabel(r'$R_i(\lambda)$ ({0:latex})'.format(response.wavelength_response[c]['response'].unit),fontsize=20)
# contamination plots
#304
#ssw
ax = axes.flatten()[-2]
ax.plot(ssw_results[304]['wavelength'],ssw_results[304]['response'],
#color=response.channel_colors[c],
label='ssw')
#sunpy
ax.plot(response.wavelength_response[304]['wavelength'],response.wavelength_response[304]['response'],
label='SunPy',linestyle='',alpha=0.95,lw=2,
#color=response.channel_colors[c],
marker='.',ms=8,markevery=2,
)
ax.set_xlim([80,100])
ax.set_title('{} $\mathrm{{\mathring{{A}}}}$ contamination from 94'.format(304),fontsize=14)
ax.set_xlabel(r'$\lambda$ ({0:latex})'.format(response.wavelength_response[c]['wavelength'].unit),fontsize=20)
ax.set_ylabel(r'$R_i(\lambda)$ ({0:latex})'.format(response.wavelength_response[c]['response'].unit),fontsize=20)
#335
ax = axes.flatten()[-1]
ax.plot(ssw_results[335]['wavelength'],ssw_results[335]['response'],
#color=response.channel_colors[c],
label='ssw')
#sunpy
ax.plot(response.wavelength_response[335]['wavelength'],response.wavelength_response[335]['response'],
label='SunPy',linestyle='',alpha=0.95,lw=2,
#color=response.channel_colors[c],
marker='.',ms=8,markevery=2,
)
ax.set_xlim([120,140])
ax.set_title('{} $\mathrm{{\mathring{{A}}}}$ contamination from 131'.format(335),fontsize=14)
ax.set_xlabel(r'$\lambda$ ({0:latex})'.format(response.wavelength_response[c]['wavelength'].unit),fontsize=20)
ax.set_ylabel(r'$R_i(\lambda)$ ({0:latex})'.format(response.wavelength_response[c]['response'].unit),fontsize=20)
axes[0,0].legend(loc='best')
plt.tight_layout()
fig,axes = plt.subplots(3,3,figsize=(12,12),sharey=True,sharex=True)
for c,ax in zip(channels,axes.flatten()):
#ssw
ax2 = ax.twinx()
ssw_interp = ssw_results[c]['response']*response.wavelength_response[c]['response'].unit
delta_response = np.fabs(response.wavelength_response[c]['response'] - ssw_interp)/(ssw_interp)
ax.plot(response.wavelength_response[c]['wavelength'],delta_response,
#color=response.channel_colors[c]
)
ax2.plot(response.wavelength_response[c]['wavelength'],response.wavelength_response[c]['response'],
color='k',linestyle='--')
ax.set_title('{} $\mathrm{{\mathring{{A}}}}$'.format(c),fontsize=20)
ax.set_xlabel(r'$\lambda$ ({0:latex})'.format(response.wavelength_response[c]['wavelength'].unit),fontsize=20)
ax.set_ylabel(r'$\frac{|\mathrm{SSW}-\mathrm{SunPy}|}{\mathrm{SSW}}$',fontsize=20)
ax2.set_ylabel(r'$R_i(\lambda)$ ({0:latex})'.format(response.wavelength_response[c]['response'].unit))
ax.set_ylim([-1.1,1.1])
plt.tight_layout()
info_table = aia.response.aia_instr_properties_to_table([94,131,171,193,211,335],
['/Users/willbarnes/Documents/Rice/Research/ssw/sdo/aia/response/aia_V6_all_fullinst.genx'])
temperature = np.logspace(5,8,50)*u.K
pressure = 1e15*u.K*u.cm**(-3)
density = pressure/temperature
ion_list = (['fe_{}'.format(i) for i in np.arange(6,26)]
+ ['ca_{}'.format(i) for i in np.arange(10,20)])
ch_data.Defaults['flux'] = 'photon'
ch_data.Defaults['abundfile'] = 'sun_coronal_1992_feldman'
ch_data.Defaults['ioneqfile'] = 'chianti'
temperature_responses = {k:np.zeros(len(temperature)) for k in response.wavelength_response}
for ion in ch_data.MasterList:
#if ion.split('_')[0] != 'fe':
# continue
print('{}: Calculating contribution function for {}'.format(ch_data.MasterList.index(ion),ion))
#declare ion object
tmp = ch.ion(ion,temperature=temperature.value,eDensity=density.value,
abundance='sun_coronal_1992_feldman')
#calculate emissivity
tmp.emiss()
em = tmp.Emiss['emiss'][np.argsort(tmp.Emiss['wvl']),:]
wvl = np.sort(tmp.Emiss['wvl'])
#calculate contribution function.
gofnt = tmp.Abundance*em*tmp.IoneqOne/tmp.EDensity
#iterate over channels
for channel in response.wavelength_response:
#print('Adding to channel {}'.format(channel))
#interpolate response function to transitions
rsp = splev(wvl,splrep(response.wavelength_response[channel]['wavelength'].value,
response.wavelength_response[channel]['response'].value))
rsp = np.where(rsp<0,0,rsp)*response._channel_info[channel]['plate_scale'].value
#weighted sum over wavelength
#add to temperature response
temperature_responses[channel] += np.dot(rsp,gofnt)
# ssw responses
precalculated_responses_data = np.loadtxt('../aia_sample_data/aia_tresponse_raw.dat')
precalc_channels = [94,131,171,193,211,304,335]
precalculated_responses = {c: precalculated_responses_data[:,i+1] for i,c in enumerate(precalc_channels)}
precalculated_responses['temperature'] = precalculated_responses_data[:,0]
# ssw responses with chiantifix and evenorm fix
precalculated_responses_data = np.loadtxt('../aia_sample_data/aia_tresponse_fix.dat')
precalculated_responses_fix = {c: precalculated_responses_data[:,i+1] for i,c in enumerate(precalc_channels)}
precalculated_responses_fix['temperature'] = precalculated_responses_data[:,0]
channel_colors = {c: sns.color_palette('Set2',7)[i] for i,c in enumerate(response.wavelength_response)}
fig,axes = plt.subplots(4,2,figsize=(15,30),sharex=True)
for channel,ax in zip(sorted(list(temperature_responses.keys())),axes.flatten()):
ax.plot(temperature,temperature_responses[channel]/(0.83*(1./4./np.pi)),
label=r'lines',
color=channel_colors[channel])
ax.plot(temperature,(temperature_responses[channel]/(0.83*(1./4./np.pi))
+ continuum_contributions[channel]),
linestyle=':',
label='lines + continuum',
color=channel_colors[channel])
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylim([1e-30,2e-24])
ax.set_xlim([1e5,1e8])
ax.set_title(r'{} $\AA$'.format(channel))
for i,pc in enumerate(precalc_channels):
axes.flatten()[i].plot(10**precalculated_responses['temperature'],
precalculated_responses[pc],
linestyle='--',
label=r'SSW',
color=channel_colors[pc])
axes.flatten()[i].plot(10**precalculated_responses_fix['temperature'],
precalculated_responses_fix[pc],
linestyle='-.',
label=r'SSW with chiantifix',
color=channel_colors[pc])
axes[0,1].legend(loc='best')
continuum_contributions = {k:np.zeros(len(temperature)) for k in response.wavelength_response}
wvl = response.wavelength_response[94]['wavelength'].value
for ion in ch_data.MasterList:
#if ion.split('_')[0] != 'fe':
# continue
print('{}: Calculating contribution function for {}'.format(ch_data.MasterList.index(ion),ion))
tmp = ch.ion(ion,temperature=temperature.value,eDensity=density.value,abundance='sun_coronal_1992_feldman')
#two photon emiss
tmp.twoPhoton(wvl)
if 'rate' in tmp.TwoPhoton:
two_photon = tmp.TwoPhoton['rate']
else:
two_photon = tmp.TwoPhoton['emiss']
#free-free
tmp_cont = ch.continuum(ion,temperature.value,abundance='sun_coronal_1992_feldman')
if tmp_cont.Ion > 1:
tmp_cont.freeFree(wvl)
if 'rate' in tmp_cont.FreeFree:
free_free = tmp_cont.FreeFree['rate']
else:
free_free = np.zeros((len(temperature),len(wvl)))
else:
free_free = np.zeros((len(temperature),len(wvl)))
#free-bound
if tmp_cont.Ion > 1:
tmp_cont.freeBound(wvl)
if 'rate' in tmp_cont.FreeBound:
free_bound = tmp_cont.FreeBound['rate']
else:
free_bound = np.zeros((len(temperature),len(wvl)))
else:
free_bound = np.zeros((len(temperature),len(wvl)))
#add to channels
for channel in response.wavelength_response:
continuum_contributions[channel] += np.dot((two_photon + free_free + free_bound),
(response.wavelength_response[channel]['response'].value
*response._channel_info[channel]['plate_scale'].value))
plt.figure(figsize=(8,8))
for channel in continuum_contributions:
plt.plot(temperature,
continuum_contributions[channel],
label=channel,color=channel_colors[channel])
plt.xscale('log')
plt.yscale('log')
plt.xlim([1e5,1e8])
plt.ylim([1e-32,1e-27])
plt.legend(loc='best')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the Trace
Step2: FTrace Object
Step3: Assertions
Step4: Assertion
Step5: Assertion
Step6: Statistics
Step7: Check if the mean temperauture is less than CONTROL_TEMP
Step8: We can also use getStatement to get the absolute values. Here we are getting the standard deviation expressed as a percentage of the mean
Step9: Thermal Residency
|
<ASSISTANT_TASK:>
Python Code:
import trappy
import numpy
config = {}
# TRAPpy Events
config["THERMAL"] = trappy.thermal.Thermal
config["OUT"] = trappy.cpu_power.CpuOutPower
config["IN"] = trappy.cpu_power.CpuInPower
config["PID"] = trappy.pid_controller.PIDController
config["GOVERNOR"] = trappy.thermal.ThermalGovernor
# Control Temperature
config["CONTROL_TEMP"] = 77000
# A temperature margin of 2.5 degrees Celsius
config["TEMP_MARGIN"] = 2500
# The Sustainable power at the control Temperature
config["SUSTAINABLE_POWER"] = 2500
# Expected percentile of CONTROL_TEMP + TEMP_MARGIN
config["EXPECTED_TEMP_QRT"] = 95
# Maximum expected Standard Deviation as a percentage
# of mean temperature
config["EXPECTED_STD_PCT"] = 5
import urllib
import os
TRACE_DIR = "example_trace_dat_thermal"
TRACE_FILE = os.path.join(TRACE_DIR, 'bart_thermal_trace.dat')
TRACE_URL = 'http://cdn.rawgit.com/sinkap/4e0a69cbff732b57e36f/raw/7dd0ed74bfc17a34a3bd5ea6b9eb3a75a42ddbae/bart_thermal_trace.dat'
if not os.path.isdir(TRACE_DIR):
os.mkdir(TRACE_DIR)
if not os.path.isfile(TRACE_FILE):
print "Fetching trace file.."
urllib.urlretrieve(TRACE_URL, filename=TRACE_FILE)
# Create a Trace object
ftrace = trappy.FTrace(TRACE_FILE, "SomeBenchMark")
# Create an Assertion Object
from bart.common.Analyzer import Analyzer
t = Analyzer(ftrace, config)
BIG = '000000f0'
LITTLE = '0000000f'
result = t.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \
& (IN:dynamic_power > 0)",reference=True, select=BIG)
if len(result):
print "FAIL: Dynamic Power is NOT Zero when load is Zero for the BIG cluster"
else:
print "PASS: Dynamic Power is Zero when load is Zero for the BIG cluster"
result = t.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \
& (IN:dynamic_power > 0)",reference=True, select=LITTLE)
if len(result):
print "FAIL: Dynamic Power is NOT Zero when load is Zero for the LITTLE cluster"
else:
print "PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster"
result = t.getStatement("(GOVERNOR:current_temperature > CONTROL_TEMP) &\
(PID:output > SUSTAINABLE_POWER)", reference=True, select=0)
if len(result):
print "FAIL: The Governor is allocating power > sustainable when T > CONTROL_TEMP"
else:
print "PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP"
t.assertStatement("numpy.percentile(THERMAL:temp, 95) < (CONTROL_TEMP + TEMP_MARGIN)")
t.assertStatement("numpy.mean(THERMAL:temp) <= CONTROL_TEMP", select=0)
t.getStatement("(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)", select=0)
from bart.thermal.ThermalAssert import ThermalAssert
t_assert = ThermalAssert(ftrace)
end = ftrace.get_duration()
LOW = 0
HIGH = 78000
# The thermal residency gives the percentage (or absolute time) spent in the
# specified temperature range.
result = t_assert.getThermalResidency(temp_range=(0, 78000),
window=(0, end),
percent=True)
for tz_id in result:
print "Thermal Zone: {} spends {:.2f}% time in the temperature range [{}, {}]".format(tz_id,
result[tz_id],
LOW/1000,
HIGH/1000)
pct_temp = numpy.percentile(t.getStatement("THERMAL:temp")[tz_id], result[tz_id])
print "The {:.2f}th percentile temperature is {:.2f}".format(result[tz_id], pct_temp / 1000.0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Plot the raw PSD
Step3: Plot a cleaned PSD
Step4: Alternative functions for PSDs
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
from mne.time_frequency import psd_multitaper
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog-proj.fif'
tmin, tmax = 0, 60 # use the first 60s of data
# Setup for reading the raw data (to save memory, crop before loading)
raw = io.read_raw_fif(raw_fname).crop(tmin, tmax).load_data()
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Add SSP projection vectors to reduce EOG and ECG artifacts
projs = read_proj(proj_fname)
raw.add_proj(projs, remove_existing=True)
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
raw.plot_psd(area_mode='range', tmax=10.0, show=False, average=True)
# Pick MEG magnetometers in the Left-temporal region
selection = read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's just look at the first few channels for demonstration purposes
picks = picks[:4]
plt.figure()
ax = plt.axes()
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=False, ax=ax, color=(0, 0, 1), picks=picks,
show=False, average=True)
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks,
show=False, average=True)
# And now do the same with SSP + notch filtering
# Pick all channels for notch since the SSP projection mixes channels together
raw.notch_filter(np.arange(60, 241, 60), n_jobs=1, fir_design='firwin')
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks,
show=False, average=True)
ax.set_title('Four left-temporal magnetometers')
plt.legend(ax.lines[::3], ['Without SSP', 'With SSP', 'SSP + Notch'])
f, ax = plt.subplots()
psds, freqs = psd_multitaper(raw, low_bias=True, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, proj=True, picks=picks,
n_jobs=1)
psds = 10 * np.log10(psds)
psds_mean = psds.mean(0)
psds_std = psds.std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD', xlabel='Frequency',
ylabel='Power Spectral Density (dB)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Eight schools example
Step4: Optimization in Stan
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import pystan
import warnings
warnings.filterwarnings("ignore")
schools_code =
data {
int<lower=0> J; // number of schools
real y[J]; // estimated treatment effects
real<lower=0> sigma[J]; // s.e. of effect estimates
}
parameters {
real mu;
real<lower=0> tau;
real eta[J];
}
transformed parameters {
real theta[J];
for (j in 1:J)
theta[j] <- mu + tau * eta[j];
}
model {
eta ~ normal(0, 1);
y ~ normal(theta, sigma);
}
schools_dat = {'J': 8,
'y': [28, 8, -3, 7, -1, 1, 18, 12],
'sigma': [15, 10, 16, 11, 9, 11, 10, 18]}
fit = pystan.stan(model_code=schools_code, data=schools_dat,
iter=1000, chains=4)
fit
la = fit.extract(permuted=True)
la.keys()
for k,v in la.iteritems():
print k,v.shape
fit.plot()
plt.gcf().set_size_inches(18.5, 10.5);
ocode =
data {
int<lower=1> N;
real y[N];
}
parameters {
real mu;
}
model {
y ~ normal(mu, 1);
}
sm = pystan.StanModel(model_code=ocode)
y2 = np.random.normal(size=20)
np.mean(y2)
op = sm.optimizing(data=dict(y=y2, N=len(y2)))
op
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import section specific modules
Step3: 2.8. The Discrete Fourier Transform (DFT) and the Fast Fourier Transform (FFT)<a id='math
Step5: Althought this would produce the correct result, this way of implementing the DFT is going to be incredibly slow. The DFT can be implemented in matrix form. Convince yourself that a vectorised implementation of this operation can be achieved with
Step6: This function will be much faster than the previous implementation. We should check that they both return the same result
Step7: Just to be sure our DFT really works, let's also compare the output of our function to numpy's built in DFT function (note numpy automatically implements a faster version of the DFT called the FFT, see the discussion below)
Step8: Great! Our function is returning the correct result. Next we do an example to demonstrate the duality between the spectral (frequency domain) and temporal (time domain) representations of a function. As the following example shows, the Fourier transform of a time series returns the frequencies contained in the signal.
Step9: Figure 2.8.1
Step10: Figure 2.8.2
Step11: That is almost a factor of ten difference. Lets compare this to numpy's built in FFT
Step13: That seems amazing! The numpy FFT is about 1000 times faster than our vectorised implementation. But how does numpy achieve this speed up? Well, by using the fast Fourier transform of course.
Step14: Lets confirm that this function returns the correct result by comparing fith numpy's FFT.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
from IPython.display import HTML
from ipywidgets import interact
HTML('../style/code_toggle.html')
def loop_DFT(x):
Implementing the DFT in a double loop
Input: x = the vector we want to find the DFT of
#Get the length of the vector (will only work for 1D arrays)
N = x.size
#Create vector to store result in
X = np.zeros(N,dtype=complex)
for k in range(N):
for n in range(N):
X[k] += np.exp(-1j*2.0*np.pi*k*n/N)*x[n]
return X
def matrix_DFT(x):
Implementing the DFT in vectorised form
Input: x = the vector we want to find the DFT of
#Get the length of the vector (will only work for 1D arrays)
N = x.size
#Create vector to store result in
n = np.arange(N)
k = n.reshape((N,1))
K = np.exp(-1j*2.0*np.pi*k*n/N)
return K.dot(x)
x = np.random.random(256) #create random vector to take the DFT of
np.allclose(loop_DFT(x),matrix_DFT(x)) #compare the result using numpy's built in function
x = np.random.random(256) #create random vector to take the DFT of
np.allclose(np.fft.fft(x),matrix_DFT(x)) #compare the result using numpy's built in function
#First we simulate a time series as the sum of a number of sinusoids each with a different frequency
N = 512 #The number of samples of the time series
tmin = -10 #The minimum value of the time coordinate
tmax = 10 #The maximum value of the time coordinate
t = np.linspace(tmin,tmax,N) #The time coordinate
f1 = 1.0 #The frequency of the first sinusoid
f2 = 2.0 #The frequency of the second sinusoid
f3 = 3.0 #The frequency of the third sinusoid
#Generate the signal
y = np.sin(2.0*np.pi*f1*t) + np.sin(2.0*np.pi*f2*t) + np.sin(2.0*np.pi*f3*t)
#Take the DFT
Y = matrix_DFT(y)
#Plot the absolute value, real and imaginary parts
plt.figure(figsize=(15, 6))
plt.subplot(121)
plt.stem(abs(Y))
plt.xlabel('$k$',fontsize=18)
plt.ylabel(r'$|Y_k|$',fontsize=18)
plt.subplot(122)
plt.stem(np.angle(Y))
plt.xlabel('$k$',fontsize=18)
plt.ylabel(r'phase$(Y_k)$',fontsize=18)
#Get the sampling frequency
delt = t[1] - t[0]
fs = 1.0/delt
k = np.arange(N)
fk = k*fs/N
plt.figure(figsize=(15, 6))
plt.subplot(121)
plt.stem(fk,abs(Y))
plt.xlabel('$f_k$',fontsize=18)
plt.ylabel(r'$|Y_k|$',fontsize=18)
plt.subplot(122)
plt.stem(fk,np.angle(Y))
plt.xlabel('$f_k$',fontsize=18)
plt.ylabel(r'phase$(Y_k)$',fontsize=18)
%timeit loop_DFT(x)
%timeit matrix_DFT(x)
%timeit np.fft.fft(x)
def one_layer_FFT(x):
An implementation of the 1D Cooley-Tukey FFT using one layer
N = x.size
if N%2>0:
print "Warning: length of x in not a power of two, returning DFT"
return matrix_DFT(x)
else:
X_even = matrix_DFT(x[::2])
X_odd = matrix_DFT(x[1::2])
factor = np.exp(-2j * np.pi * np.arange(N) / N)
return np.concatenate([X_even + factor[:N / 2] * X_odd,X_even + factor[N / 2:] * X_odd])
np.allclose(np.fft.fft(x),one_layer_FFT(x))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Block the output of all cores except for one
Step2: Define an md.export_cfg object
Step3: Screw dislocation
Step4: Create a $\langle110\rangle\times\langle112\rangle\times\frac{1}{2}\langle111\rangle$ cell
Step5: Remove half of the atoms and readjust the position of remaining
Step6: Readjust the postions
Step7: Replicating the unit cell
Step8: putting it all together
Step9: Edge dislocation
Step10: putting it all together
Step11: Putting it all together
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import mapp4py
from mapp4py import md
from lib.elasticity import rot, cubic, resize, displace, HirthEdge, HirthScrew
from mapp4py import mpi
if mpi().rank!=0:
with open(os.devnull, 'w') as f:
sys.stdout = f;
xprt = md.export_cfg("");
sim=md.atoms.import_cfg('configs/Fe_300K.cfg');
nlyrs_fxd=2
a=sim.H[0][0];
b_norm=0.5*a*np.sqrt(3.0);
b=np.array([1.0,1.0,1.0])
s=np.array([1.0,-1.0,0.0])/np.sqrt(2.0)
sim.cell_change([[1,-1,0],[1,1,-2],[1,1,1]])
H=np.array(sim.H);
def _(x):
if x[2] > 0.5*H[2, 2] - 1.0e-8:
return False;
else:
x[2]*=2.0;
sim.do(_);
_ = np.full((3,3), 0.0)
_[2, 2] = - 0.5
sim.strain(_)
displace(sim,np.array([sim.H[0][0]/6.0,sim.H[1][1]/6.0,0.0]))
max_natms=100000
H=np.array(sim.H);
n_per_area=sim.natms/(H[0,0] * H[1,1]);
_ =np.sqrt(max_natms/n_per_area);
N0 = np.array([
np.around(_ / sim.H[0][0]),
np.around(_ / sim.H[1][1]),
1], dtype=np.int32)
sim *= N0;
H = np.array(sim.H);
H_new = np.array(sim.H);
H_new[1][1] += 50.0
resize(sim, H_new, np.full((3),0.5) @ H)
C_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241);
Q=np.array([np.cross(s,b)/np.linalg.norm(np.cross(s,b)),s/np.linalg.norm(s),b/np.linalg.norm(b)])
hirth = HirthScrew(rot(C_Fe,Q), rot(b*0.5*a,Q))
ctr = np.full((3),0.5) @ H_new;
s_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1])
def _(x,x_d,x_dof):
sy=(x[1]-ctr[1])/H[1, 1];
x0=(x-ctr)/H[0, 0];
if sy>s_fxd or sy<=-s_fxd:
x_dof[1]=x_dof[2]=False;
x+=b_norm*hirth.ave_disp(x0)
else:
x+=b_norm*hirth.disp(x0)
sim.do(_)
H = np.array(sim.H);
H_inv = np.array(sim.B);
H_new = np.array(sim.H);
H_new[0,0]=np.sqrt(H[0,0]**2+(0.5*b_norm)**2)
H_new[2,0]=H[2,2]*0.5*b_norm/H_new[0,0]
H_new[2,2]=np.sqrt(H[2,2]**2-H_new[2,0]**2)
F = np.transpose(H_inv @ H_new);
sim.strain(F - np.identity(3))
xprt(sim, "dumps/screw.cfg")
def make_scrw(nlyrs_fxd,nlyrs_vel,vel):
#this is for 0K
#c_Fe=cubic(1.5187249951755375,0.9053185628093443,0.7249256807942608);
#this is for 300K
c_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241);
#N0=np.array([80,46,5],dtype=np.int32)
sim=md.atoms.import_cfg('configs/Fe_300K.cfg');
a=sim.H[0][0];
b_norm=0.5*a*np.sqrt(3.0);
b=np.array([1.0,1.0,1.0])
s=np.array([1.0,-1.0,0.0])/np.sqrt(2.0)
Q=np.array([np.cross(s,b)/np.linalg.norm(np.cross(s,b)),s/np.linalg.norm(s),b/np.linalg.norm(b)])
c0=rot(c_Fe,Q)
hirth = HirthScrew(rot(c_Fe,Q),np.dot(Q,b)*0.5*a)
sim.cell_change([[1,-1,0],[1,1,-2],[1,1,1]])
displace(sim,np.array([sim.H[0][0]/6.0,sim.H[1][1]/6.0,0.0]))
max_natms=1000000
n_per_vol=sim.natms/sim.vol;
_=np.power(max_natms/n_per_vol,1.0/3.0);
N1=np.full((3),0,dtype=np.int32);
for i in range(0,3):
N1[i]=int(np.around(_/sim.H[i][i]));
N0=np.array([N1[0],N1[1],1],dtype=np.int32);
sim*=N0;
sim.kB=8.617330350e-5
sim.create_temp(300.0,8569643);
H=np.array(sim.H);
H_new=np.array(sim.H);
H_new[1][1]+=50.0
resize(sim, H_new, np.full((3),0.5) @ H)
ctr=np.dot(np.full((3),0.5),H_new);
s_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1])
s_vel=0.5-0.5*float(nlyrs_vel)/float(N0[1])
def _(x,x_d,x_dof):
sy=(x[1]-ctr[1])/H[1][1];
x0=(x-ctr)/H[0][0];
if sy>s_fxd or sy<=-s_fxd:
x_d[1]=0.0;
x_dof[1]=x_dof[2]=False;
x+=b_norm*hirth.ave_disp(x0)
else:
x+=b_norm*hirth.disp(x0)
if sy<=-s_vel or sy>s_vel:
x_d[2]=2.0*sy*vel;
sim.do(_)
H = np.array(sim.H);
H_inv = np.array(sim.B);
H_new = np.array(sim.H);
H_new[0,0]=np.sqrt(H[0,0]**2+(0.5*b_norm)**2)
H_new[2,0]=H[2,2]*0.5*b_norm/H_new[0,0]
H_new[2,2]=np.sqrt(H[2,2]**2-H_new[2,0]**2)
F = np.transpose(H_inv @ H_new);
sim.strain(F - np.identity(3))
return N1[2],sim;
sim=md.atoms.import_cfg('configs/Fe_300K.cfg');
nlyrs_fxd=2
a=sim.H[0][0];
b_norm=0.5*a*np.sqrt(3.0);
b=np.array([1.0,1.0,1.0])
s=np.array([1.0,-1.0,0.0])/np.sqrt(2.0)
sim.cell_change([[1,1,1],[1,-1,0],[1,1,-2]])
H=np.array(sim.H);
def _(x):
if x[0] > 0.5*H[0, 0] - 1.0e-8:
return False;
else:
x[0]*=2.0;
sim.do(_);
_ = np.full((3,3), 0.0)
_[0,0] = - 0.5
sim.strain(_)
displace(sim,np.array([0.0,sim.H[1][1]/4.0,0.0]))
max_natms=100000
H=np.array(sim.H);
n_per_area=sim.natms/(H[0, 0] * H[1, 1]);
_ =np.sqrt(max_natms/n_per_area);
N0 = np.array([
np.around(_ / sim.H[0, 0]),
np.around(_ / sim.H[1, 1]),
1], dtype=np.int32)
sim *= N0;
# remove one layer along ... direction
H=np.array(sim.H);
frac=H[0,0] /N0[0]
def _(x):
if x[0] < H[0, 0] /N0[0] and x[1] >0.5*H[1, 1]:
return False;
sim.do(_)
H = np.array(sim.H);
H_new = np.array(sim.H);
H_new[1][1] += 50.0
resize(sim, H_new, np.full((3),0.5) @ H)
C_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241);
_ = np.cross(b,s)
Q = np.array([b/np.linalg.norm(b), s/np.linalg.norm(s), _/np.linalg.norm(_)])
hirth = HirthEdge(rot(C_Fe,Q), rot(b*0.5*a,Q))
_ = (1.0+0.5*(N0[0]-1.0))/N0[0];
ctr = np.array([_,0.5,0.5]) @ H_new;
frac = H[0][0]/N0[0]
s_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1])
def _(x,x_d,x_dof):
sy=(x[1]-ctr[1])/H[1, 1];
x0=(x-ctr);
if(x0[1]>0.0):
x0/=(H[0, 0]-frac)
else:
x0/= H[0, 0]
if sy>s_fxd or sy<=-s_fxd:
x+=b_norm*hirth.ave_disp(x0);
x_dof[0]=x_dof[1]=False;
else:
x+=b_norm*hirth.disp(x0);
x[0]-=0.25*b_norm;
sim.do(_)
H = np.array(sim.H)
H_new = np.array(sim.H);
H_new[0, 0] -= 0.5*b_norm;
resize(sim, H_new, np.full((3),0.5) @ H)
xprt(sim, "dumps/edge.cfg")
def make_edge(nlyrs_fxd,nlyrs_vel,vel):
#this is for 0K
#c_Fe=cubic(1.5187249951755375,0.9053185628093443,0.7249256807942608);
#this is for 300K
c_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241);
#N0=np.array([80,46,5],dtype=np.int32)
sim=md.atoms.import_cfg('configs/Fe_300K.cfg');
a=sim.H[0][0];
b_norm=0.5*a*np.sqrt(3.0);
b=np.array([1.0,1.0,1.0])
s=np.array([1.0,-1.0,0.0])/np.sqrt(2.0)
# create rotation matrix
_ = np.cross(b,s)
Q=np.array([b/np.linalg.norm(b), s/np.linalg.norm(s), _/np.linalg.norm(_)])
hirth = HirthEdge(rot(c_Fe,Q),np.dot(Q,b)*0.5*a)
# create a unit cell
sim.cell_change([[1,1,1],[1,-1,0],[1,1,-2]])
H=np.array(sim.H);
def f0(x):
if x[0]>0.5*H[0][0]-1.0e-8:
return False;
else:
x[0]*=2.0;
sim.do(f0);
_ = np.full((3,3), 0.0)
_[0,0] = - 0.5
sim.strain(_)
displace(sim,np.array([0.0,sim.H[1][1]/4.0,0.0]))
max_natms=1000000
n_per_vol=sim.natms/sim.vol;
_=np.power(max_natms/n_per_vol,1.0/3.0);
N1=np.full((3),0,dtype=np.int32);
for i in range(0,3):
N1[i]=int(np.around(_/sim.H[i][i]));
N0=np.array([N1[0],N1[1],1],dtype=np.int32);
N0[0]+=1;
sim*=N0;
# remove one layer along ... direction
H=np.array(sim.H);
frac=H[0][0]/N0[0]
def _(x):
if x[0] < H[0][0]/N0[0] and x[1]>0.5*H[1][1]:
return False;
sim.do(_)
sim.kB=8.617330350e-5
sim.create_temp(300.0,8569643);
H = np.array(sim.H);
H_new = np.array(sim.H);
H_new[1][1] += 50.0
ctr=np.dot(np.full((3),0.5),H);
resize(sim,H_new, np.full((3),0.5) @ H)
l=(1.0+0.5*(N0[0]-1.0))/N0[0];
ctr=np.dot(np.array([l,0.5,0.5]),H_new);
frac=H[0][0]/N0[0]
s_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1])
s_vel=0.5-0.5*float(nlyrs_vel)/float(N0[1])
def f(x,x_d,x_dof):
sy=(x[1]-ctr[1])/H[1][1];
x0=(x-ctr);
if(x0[1]>0.0):
x0/=(H[0][0]-frac)
else:
x0/= H[0][0]
if sy>s_fxd or sy<=-s_fxd:
x_d[1]=0.0;
x_dof[0]=x_dof[1]=False;
x+=b_norm*hirth.ave_disp(x0);
else:
x+=b_norm*hirth.disp(x0);
if sy<=-s_vel or sy>s_vel:
x_d[0]=2.0*sy*vel;
x[0]-=0.25*b_norm;
sim.do(f)
H = np.array(sim.H)
H_new = np.array(sim.H);
H_new[0, 0] -= 0.5*b_norm;
resize(sim, H_new, np.full((3),0.5) @ H)
return N1[2], sim;
nlyrs_fxd=2
nlyrs_vel=7;
vel=-0.004;
N,sim=make_edge(nlyrs_fxd,nlyrs_vel,vel)
xprt(sim, "dumps/edge.cfg")
_ = np.array([[-1,1,0],[1,1,1],[1,1,-2]], dtype=np.float);
Q = np.linalg.inv(np.sqrt(_ @ _.T)) @ _;
C = rot(cubic(1.3967587463636366,0.787341583191591,0.609615090769241),Q)
B = np.linalg.inv(
np.array([
[C[0, 0, 0, 0], C[0, 0, 1, 1], C[0, 0, 0, 1]],
[C[0, 0, 1, 1], C[1, 1, 1, 1], C[1, 1, 0, 1]],
[C[0, 0, 0, 1], C[1, 1, 0, 1], C[0, 1, 0, 1]]
]
))
_ = np.roots([B[0, 0], -2.0*B[0, 2],2.0*B[0, 1]+B[2, 2], -2.0*B[1, 2], B[1, 1]])
mu = np.array([_[0],0.0]);
if np.absolute(np.conjugate(mu[0]) - _[1]) > 1.0e-12:
mu[1] = _[1];
else:
mu[1] = _[2]
alpha = np.real(mu);
beta = np.imag(mu);
p = B[0,0] * mu**2 - B[0,2] * mu + B[0, 1]
q = B[0,1] * mu - B[0, 2] + B[1, 1]/ mu
K = np.stack([p, q]) * np.array(mu[1], mu[0]) /(mu[1] - mu[0])
K_r = np.real(K)
K_i = np.imag(K)
Tr = np.stack([
np.array(np.array([[1.0, alpha[0]], [0.0, beta[0]]])),
np.array([[1.0, alpha[1]], [0.0, beta[1]]])
], axis=1)
def u_f0(x): return np.sqrt(np.sqrt(x[0] * x[0] + x[1] * x[1]) + x[0])
def u_f1(x): return np.sqrt(np.sqrt(x[0] * x[0] + x[1] * x[1]) - x[0]) * np.sign(x[1])
def disp(x):
_ = Tr @ x
return K_r @ u_f0(_) + K_i @ u_f1(_)
_ = np.array([[-1,1,0],[1,1,1],[1,1,-2]], dtype=np.float);
Q = np.linalg.inv(np.sqrt(_ @ _.T)) @ _;
C = rot(cubic(1.3967587463636366,0.787341583191591,0.609615090769241),Q)
disp = crack(C)
n = 300;
r = 10;
disp_scale = 0.3;
n0 = int(np.round(n/ (1 +np.pi), ))
n1 = n - n0
xs = np.concatenate((
np.stack([np.linspace(0, -r , n0), np.full((n0,), -1.e-8)]),
r * np.stack([np.cos(np.linspace(-np.pi, np.pi , n1)),np.sin(np.linspace(-np.pi, np.pi , n1))]),
np.stack([np.linspace(-r, 0 , n0), np.full((n0,), 1.e-8)]),
), axis =1)
xs_def = xs + disp_scale * disp(xs)
fig, ax = plt.subplots(figsize=(10.5,5), ncols = 2)
ax[0].plot(xs[0], xs[1], "b-", label="non-deformed");
ax[1].plot(xs_def[0], xs_def[1], "r-.", label="deformed");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Array are similar to python list , but it all element must be of same data type, and it faster than list
Step2: Lets see some of functionality
Step3: Vector Operation
Step4: vector [+-*/] Scaler
Step5: vector & boolean vector
Step6: num[bl],, what it will return ??
Step7: find all elemnt greter than 100 from num
Step8: <h5> All element less than 50 ??
Step9: In-place operation in numpay (Diff between += and +)
Step10: <h5>this happen becouse a and b both point to same array and + operation create a new array and then a point to that so b remain unaffected" </h5>
Step11: Pandas Series
Step12: <h6>See All basic results using describe() function
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
num = np.array([3,4,2,5,7,23,56,23,7,23,89,43,676,43])
num
print "Mean :",num.mean()
print "sum :",num.sum()
print "max :",num.max()
print "std :",num.std()
#slicing
num[:5]
#find index of any element let say max
print "index of max :",num.argmax()
print "data Type of array :",num.dtype
a=np.array([5,6,15])
b=np.array([5,4,-5])
# Addition
print "{} + {} = {}".format(a,b,a+b)
print "{} * {} = {}".format(a,b,a*b)
print "{} / {} = {}".format(a,b,a/b)
# If size mismatch then error occure
b=np.array([5,4,-5,5])
print "{} + {} = {}".format(a,b,a+b)
print "{} + {} = {}".format(a,3,a+3)
print "{} * {} = {}".format(a,3,a*3)
print "{} / {} = {}".format(a,3,a/3)
num=np.array([5,6,15,65,32,656,23,435,2,45,21])
bl=np.array([False,True,True,False,True,False,True,False,True,True,False])
num[6]
num[bl]
num[num>100]
num[num<50]
a=np.array([5,6,15])
b=a
a += 2
print b
print "this happen becouse a and b both point to same array and += is In-place operation so it maintain that"
a=np.array([5,6,15])
b=a
a = a + 2
print b
a=np.array([5,6,15])
b=a[:3]
b[0]=1000
print a,"Reason is similar as +="
import pandas as pd
num = pd.Series([3,4,2,5,7,23,56,23,7,23,89,43,676,43])
num
num.describe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame({'DateTime': ['2000-01-04', '2000-01-05', '2000-01-06', '2000-01-07'],
'Close': [1460, 1470, 1480, 1450]})
def g(df):
df['label'] = df.Close.diff().fillna(1).gt(0).astype(int)
return df
df = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step2: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
Step3: And we can see the characters encoded as integers.
Step4: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
Step5: Making training mini-batches
Step6: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
Step7: If you implemented get_batches correctly, the above output should look something like
Step8: LSTM Cell
Step9: RNN Output
Step10: Training loss
Step11: Optimizer
Step12: Build the network
Step13: Hyperparameters
Step14: Time for training
Step15: Saved checkpoints
Step16: Sampling
Step17: Here, pass in the path to a checkpoint and sample from the network.
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = sorted(set(text))
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
encoded[:100]
len(vocab)
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the number of characters per batch and number of batches we can make
characters_per_batch = n_seqs * n_steps
n_batches = len(arr)//characters_per_batch
# Keep only enough characters to make full batches
arr = arr[:n_batches * characters_per_batch]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
yield x, y
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
encoded[:100]
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
def build_cell(lstm_size, keep_prob):
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([build_cell(lstm_size, keep_prob) for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
x: Input tensor
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# That is, the shape should be batch_size*num_steps rows by lstm_size columns
seq_output = tf.concat(lstm_output, axis=1)
x = tf.reshape(seq_output, [-1, in_size])
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
softmax_w = tf.Variable(tf.truncated_normal((in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name='predictions')
return out, logits
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per batch_size per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN and collect the outputs
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
batch_size = 100 # Sequences per batch
num_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
tf.train.get_checkpoint_state('checkpoints')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Un mini-ejemplo
Step2: Visualizando
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np # modulo de computo numerico
import matplotlib.pyplot as plt # modulo de graficas
# esta linea hace que las graficas salgan en el notebook
import seaborn as sns
%matplotlib inline
df=pd.read_csv('files/ejemplo.csv')
print('df.shape)
df.head()
sns.pairplot(df,hue='Tipo')
plt.title('Distribuciones de Datos')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parse results
Step2: PageRank Seeds Percentage
Step3: Avg adopters per seed comparison
Step4: Eigenvector Seeds Percentage
Step5: Avg adopters per seed comparison
Step6: Betweenness Seeds Percentage
Step7: Avg adopters per seed comparison
Step8: 100 runs adopters comparison
Step9: Centrality Measures Averages
Step10: Eigenv avg adopters and seed
Step11: Betweenness avg adopters and seed
|
<ASSISTANT_TASK:>
Python Code:
#!/usr/bin/python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from stats import parse_results, get_percentage, get_avg_per_seed, draw_pie, draw_bars, draw_bars_comparison, draw_avgs
pr, eigen, bet = parse_results('test_genws.txt')
draw_pie(get_percentage(pr))
draw_bars_comparison('Avg adopters per seeds', 'Avg adopters', np.array(get_avg_per_seed(pr)+[(0, np.mean(pr[:,1]))]))
draw_pie(get_percentage(eigen))
draw_bars_comparison('Avg adopters per seeds', 'Avg adopters', np.array(get_avg_per_seed(eigen)+[(0, np.mean(eigen[:,1]))]))
draw_pie(get_percentage(bet))
draw_bars_comparison('Avg adopters per seeds', 'Avg adopters', np.array(get_avg_per_seed(bet)+[(0, np.mean(bet[:,1]))]))
draw_bars(np.sort(pr.view('i8,i8'), order=['f0'], axis=0).view(np.int),
np.sort(eigen.view('i8,i8'), order=['f0'], axis=0).view(np.int),
np.sort(bet.view('i8,i8'), order=['f0'], axis=0).view(np.int))
pr_mean = np.mean(pr[:,1])
pr_mean_seed = np.mean(pr[:,0])
print 'Avg Seed:',pr_mean_seed, 'Avg adopters:', pr_mean
eigen_mean = np.mean(eigen[:,1])
eigen_mean_seed = np.mean(eigen[:,0])
print 'Avg Seed:',eigen_mean_seed, 'Avg adopters:',eigen_mean
bet_mean = np.mean(bet[:,1])
bet_mean_seed = np.mean(bet[:,0])
print 'Avg Seed:',bet_mean_seed, 'Avg adopters:',bet_mean
draw_avgs([pr_mean, eigen_mean, bet_mean])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-3', 'sandbox-2', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: P2. Check Permutation
Step2: P3. URLify
Step3: P4. Palindrome Permutation
Step4: P5. One Away
Step5: P6. String Compression
Step6: P7. Rotate Matrix
Step7: P8. Zero Matrix
|
<ASSISTANT_TASK:>
Python Code:
# With Hashmap.
# Time Complexity: O(n)
def if_unique(string):
chr_dict = {}
for char in string:
if char not in chr_dict:
chr_dict[char] = 1
else:
return False
return True
# Without additional memory.
# Time Complexity: O(n^2)
def if_unique_m(string):
for idx, char in enumerate(string):
for j in range(idx + 1, len(string)):
if char == string[j]:
return False
return True
# Test cases.
print(if_unique("1234"), if_unique_m("1234"))
print(if_unique("12344"), if_unique_m("12344"))
print(if_unique("1214"), if_unique_m("1214"))
print(if_unique("1"), if_unique_m("1"))
print(if_unique(""), if_unique_m(""))
# Using a hashmap.
# Checking if str1 is a permutation of str2.
# Assumptions: The strings can have repeated characters.
# Time Complexity: O(n)
def if_permute(str1, str2):
if len(str1) != len(str2):
return False
def get_chr_dict(string):
chr_dict = {}
for char in string:
if char not in chr_dict:
chr_dict[char] = 1
else:
chr_dict[char] += 1
return chr_dict
str1_d = get_chr_dict(str1) # String 1.
str2_d = get_chr_dict(str2) # String 2.
# Compare dictionaries.
for char in str1_d:
if char not in str2_d or str2_d[char] != str1_d[char]:
return False
return True
# Test Cases
print(if_permute("", ""))
print(if_permute("abc", "abc"))
print(if_permute("abbc", "abbc"))
print(if_permute("abcc", "abcc"))
print(if_permute("aaa", "aaa"))
print(if_permute("aaad", "aaac"))
# Replace spaces with %20 characters.
# Time Complexity: O(n)
def replace_space(string):
parts = string.split(" ")
url = ""
for p in parts:
if p != "":
url += p + "%20"
return url[:-3]
print(replace_space("Mr John Smith "))
print(replace_space(""))
print(replace_space(" John Smith"))
# Build dictionary of all characters in the string and check if all even.
def check_palin_permute(string):
c_dict = {}
for char in string:
if char is not " ":
if char in c_dict:
c_dict[char] += 1
else:
c_dict[char] = 1
num_1 = 0
for char in c_dict:
if c_dict[char]%2 == 1:
num_1 += 1
if num_1 > 1:
return False
return True
print(check_palin_permute("tact coa"))
# Check the length of the strings to find which operation needs to be performed i.e. insert, delete or replace.
# Time Complexity: O(n)
def edit_distance(str1, str2) -> bool:
if abs(len(str1) - len(str2)) > 1:
return False
i = 0; j = 0; edits = 0
while(i < len(str1) and j < len(str2)):
if str1[i] != str2[j]: # Either replace or move.
edits += 1
if len(str1) > len(str2):
j += 1
elif len(str1) < len(str2):
i += 1
i += 1; j += 1
if edits > 1:
return False
return True
print(edit_distance("pale", "ple"))
print(edit_distance("pale", "bake"))
print(edit_distance("pales", "pale"))
print(edit_distance("pale", "bale"))
print(edit_distance("pales", "bale"))
# Perform running compression on a string.
# Time Complexity: O(n)
def compress(string: str) -> str:
com_str = ""
count = 0
for i in range(0, len(string) - 1):
if string[i] == string[i+1]:
count += 1
else:
com_str += string[i] + str(count + 1)
count = 0 # Reset count.
# Edge case for last character.
if string[i] == string[i+1]:
com_str += string[i] + str(count + 1)
else:
com_str += string[i+1] + str(1)
if len(com_str) > len(string):
return string
return com_str
print(compress("aabbbcdefgFFFFFFFFFc"))
print(compress("aabbbcdefgFFFFFFFFF"))
print(compress("aabbbcdefgFFFF"))
# Rotate a Matrix in-place.
# Given: The matrix is a square matrix.
# Time Complexity: O(n^2)
# Space Complexity: n^2
Matrix = List[List[int]]
def rotate_matrix(mat: Matrix) -> Matrix:
mat_size = (len(mat), len(mat[0]))
# Create Matrix of equal size.
rot = [[] for i in range(0 , mat_size[0])]
for i in range(mat_size[0] - 1, -1, -1):
for j in range(0, mat_size[1]):
rot[j].append(mat[i][j])
return rot
rotate_matrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
# print(swap(2, 3))
# NOTE: This problem is flawed because by making an entire row and column 0, the entire matrix will become zero.
# Make a zero matrix from a given matrix.
# Time complexity: O(n^3)
Matrix = List[List[int]]
def zero_matrix(mat: Matrix) -> Matrix:
zero_c = {}
zero_r = {}
mat_size = (len(mat), len(mat[0]))
for i in range(0, mat_size[0]):
for j in range(0, mat_size[1]):
if mat[i][j] == 0 and i not in zero_r and j not in zero_c:
for k in range(0, mat_size[0]):
mat[k][j] = 0
for k in range(0, mat_size[1]):
mat[i][k] = 0
zero_r[k] = 1
zero_c[k] = 1
return mat
print(zero_matrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's look at the first few lines.
Step2: Read about the column names and the meaning of the ID values at the data set's web site.
Step3: Let's just replace commas with spaces, using unix. Read about the tr unix command at Linux TR Command Examples
Step4: To see this data, let's try plotting each column as a separate curve on the same axes.
Step5: Each sample has 8 attribues, so each sample is a point in 8-dimensional space. I wonder how well the samples "clump" in those 8 dimensions. Let's try clustering them with the k-means algorithm.
Step6: Now we must find all samples that are closest to the first center, and those that are closest to the second sample.
Step7: But what if we want to subtract every element of a with every element of b?
Step8: However, we can ask numpy to do this duplication for us if we reshape a to be a column vector and leave b as a row vector.
Step9: The single row vector a is duplicated for as many rows as there are in b! We can use this to calculate the squared distance between a center and every sample.
Step10: Let's define $J$ from the book, which is a performance measure being minimized by k-means. It is defined on page 424 as
Step11: So, the matrix form is 10 times faster!
Step12: This will be a little weird, and hard to understand, but by adding an empty dimension to the centers array, numpy broadcasting does all the work for us.
Step13: Now we have a 2 x 5 array with the first row containing the squared distance from the first center to each of the five data samples, and the second row containing the squared distances from the second center to each of the five data samples.
Step14: Let's define a function named calcJ to do this calculation.
|
<ASSISTANT_TASK:>
Python Code:
!wget http://archive.ics.uci.edu/ml/machine-learning-databases/00299/StoneFlakes.dat
!head StoneFlakes.dat
import pandas
d = pandas.read_csv(open('StoneFlakes.dat'))
d[:5]
d = pandas.read_csv(open('StoneFlakes.dat'),sep=',')
d[:5]
! tr -s ' ' ',' < StoneFlakes.dat > StoneFlakes2.dat
! head StoneFlakes2.dat
d = pandas.read_csv(open('StoneFlakes2.dat'))
d[:5]
d = pandas.read_csv(open('StoneFlakes2.dat'),na_values='?')
d[:5]
d = pandas.read_csv(open('StoneFlakes2.dat'),na_values='?',error_bad_lines=False)
d[:5]
d[:5].isnull()
d[:5].isnull().any(axis=1)
d[:5].isnull().any(axis=1) == False
print(d.shape)
d = d[d.isnull().any(axis=1)==False]
print(d.shape)
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
data = d.iloc[:,1:].values
data.shape
data[:5,:]
plt.plot(data);
np.random.choice(range(data.shape[0]),2, replace=False) # data.shape[0] is number of rows, or samples
np.random.choice(range(data.shape[0]),2, replace=False)
centersIndex = np.random.choice(range(data.shape[0]),2, replace=False)
centersIndex
centers = data[centersIndex,:]
centers
a = np.array([1,2,3])
b = np.array([10,20,30])
a, b
a-b
np.resize(a,(3,3))
np.resize(b, (3,3))
np.resize(a,(3,3)).T
np.resize(a,(3,3)).T - np.resize(b,(3,3))
a[:,np.newaxis]
a.reshape((-1,1))
a.shape, a[:,np.newaxis].shape
a[:,np.newaxis] - b
a = np.array([1,2,3])
b = np.array([[10,20,30],[40,50,60]])
print(a)
print(b)
b-a
centers[0,:]
np.sum((centers[0,:] - data)**2, axis=1)
np.sum((centers[1,:] - data)**2, axis=1) > np.sum((centers[0,:] - data)**2, axis=1)
centers
centers[:,np.newaxis,:].shape, data.shape
(centers[:,np.newaxis,:] - data).shape
np.sum((centers[:,np.newaxis,:] - data)**2, axis=2).shape
np.argmin(np.sum((centers[:,np.newaxis,:] - data)**2, axis=2), axis=0)
cluster = np.argmin(np.sum((centers[:,np.newaxis,:] - data)**2, axis=2), axis=0)
cluster
data[cluster==0,:].mean(axis=0)
data[cluster==1,:].mean(axis=0)
k = 2
for i in range(k):
centers[i,:] = data[cluster==i,:].mean(axis=0)
centers
def kmeans(data, k = 2, n = 5):
# Initial centers
centers = data[np.random.choice(range(data.shape[0]),k, replace=False), :]
# Repeat n times
for iteration in range(n):
# Which center is each sample closest to?
closest = np.argmin(np.sum((centers[:,np.newaxis,:] - data)**2, axis=2), axis=0)
# Update cluster centers
for i in range(k):
centers[i,:] = data[closest==i,:].mean(axis=0)
return centers
kmeans(data,2)
kmeans(data,2)
a = np.linspace(0,10,30).reshape(3,10)
a
b = np.arange(30).reshape(3,10)
b
result = np.zeros((3,10))
for i in range(3):
for j in range(10):
result[i,j] = a[i,j] + b[i,j]
result
%%timeit
result = np.zeros((3,10))
for i in range(3):
for j in range(10):
result[i,j] = a[i,j] + b[i,j]
result = a + b
result
%%timeit
result = a + b
centers = np.array([[1,2],[5,4]])
centers
data = np.array([[3,2],[4,6],[7,3],[4,6],[1,8]])
data
centers[:,np.newaxis,:]
centers[:,np.newaxis,:].shape
data.shape
diffsq = (centers[:,np.newaxis,:] - data)**2
diffsq
diffsq.shape
np.sum(diffsq,axis=2)
np.min(np.sum(diffsq,axis=2), axis=0)
np.sum(np.min(np.sum(diffsq,axis=2), axis=0))
def calcJ(data,centers):
diffsq = (centers[:,np.newaxis,:] - data)**2
return np.sum(np.min(np.sum(diffsq,axis=2), axis=0))
calcJ(data,centers)
def kmeans(data, k = 2, n = 5):
# Initialize centers and list J to track performance metric
centers = data[np.random.choice(range(data.shape[0]),k,replace=False), :]
J = []
# Repeat n times
for iteration in range(n):
# Which center is each sample closest to?
sqdistances = np.sum((centers[:,np.newaxis,:] - data)**2, axis=2)
closest = np.argmin(sqdistances, axis=0)
# Calculate J and append to list J
J.append(calcJ(data,centers))
# Update cluster centers
for i in range(k):
centers[i,:] = data[closest==i,:].mean(axis=0)
# Calculate J one final time and return results
J.append(calcJ(data,centers))
return centers,J,closest
centers,J,closest = kmeans(data,2)
J
plt.plot(J);
centers,J,closest = kmeans(data,2,10)
plt.plot(J);
centers,J,closest = kmeans(data,3,10)
plt.plot(J);
small = np.array([[8,7],[7,6.6],[9.2,8.3],[6.8,9.2], [1.2,3.2],[4.8,2.3],[3.4,3.2],[3.2,5.6],[1,4],[2,2.2]])
plt.scatter(small[:,0],small[:,1]);
c,J,closest = kmeans(small,2,n=2)
c
closest
plt.scatter(small[:,0], small[:,1], s=80, c=closest, alpha=0.5);
plt.scatter(c[:,0],c[:,1],s=80,c="green",alpha=0.5);
c,J,closest = kmeans(small,2,n=2)
plt.scatter(small[:,0], small[:,1], s=80, c=closest, alpha=0.5);
plt.scatter(c[:,0],c[:,1],s=80,c="green",alpha=0.5);
J
import gzip
import pickle
with gzip.open('mnist.pkl.gz', 'rb') as f:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
# zero = train_set[0][1,:].reshape((28,28,1))
# one = train_set[0][3,:].reshape((28,28,1))
# two = train_set[0][5,:].reshape((28,28,1))
# four = train_set[0][20,:].reshape((28,28,1))
X = train_set[0]
T = train_set[1].reshape((-1,1))
Xtest = test_set[0]
Ttest = test_set[1].reshape((-1,1))
X.shape, T.shape, Xtest.shape, Ttest.shape
c,J,closest = kmeans(X, k=10, n=20)
plt.plot(J)
c.shape
for i in range(10):
plt.subplot(2,5,i+1)
plt.imshow(-c[i,:].reshape((28,28)), interpolation='nearest', cmap='gray')
plt.axis('off')
c,J,closest = kmeans(X, k=10, n=20)
plt.plot(J)
plt.figure()
for i in range(10):
plt.subplot(2,5,i+1)
plt.imshow(-c[i,:].reshape((28,28)), interpolation='nearest', cmap='gray')
plt.axis('off')
c,J,closest = kmeans(X, k=20, n=20)
plt.plot(J)
plt.figure()
for i in range(20):
plt.subplot(4,5,i+1)
plt.imshow(-c[i,:].reshape((28,28)), interpolation='nearest', cmap='gray')
plt.axis('off')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Project Euler
Step2: Now write a set of assert tests for your number_to_words function that verifies that it is working as expected.
Step4: Now define a count_letters(n) that returns the number of letters used to write out the words for all of the the numbers 1 to n inclusive.
Step5: Now write a set of assert tests for your count_letters function that verifies that it is working as expected.
Step6: Finally used your count_letters function to solve the original question.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
def number_to_words(n):
Given a number n between 1-1000 inclusive return a list of words for the number.
# YOUR CODE HERE
#raise NotImplementedError()
ones=['one','two','three','four','five','six','seven','eight','nine','ten']
teens=['eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']
tens=['twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety']
#obivous what if statements do so not going comment everything
if n<=10:
x=ones[n-1]
if 10<n<20:
x=teens[n-11]
if n!=10 and n<100 and n%10==0:
x=tens[int(n/10)-2]
if 20<n<30:
x=tens[0]+'-'+ones[n%20-1]
if 30<n<40:
x=tens[1]+'-'+ones[n%30-1]
if 40<n<50:
x=tens[2]+'-'+ones[n%40-1]
if 50<n<60:
x=tens[3]+'-'+ones[n%50-1]
if 60<n<70:
x=tens[4]+'-'+ones[n%60-1]
if 70<n<80:
x=tens[5]+'-'+ones[n%70-1]
if 80<n<90:
x=tens[6]+'-'+ones[n%80-1]
if 90<n<100:
x=tens[7]+'-'+ones[n%90-1]
if 100<=n<1000:
a = str(n)
b = ones[int(a[0])-1]+' hundred and'
if n%100==0:
x = ones[int(a[0])-1]+' hundred'
else:
if int(a[1::])<=10:
x=b+' '+ones[int(a[1::])-1]
if 10<int(a[1::])<20:
x=b+' '+teens[int(a[1::])-11]
if int(a[1::])!=10 and int(a[1::])<100 and int(a[1::])%10==0:
x=b+' '+tens[int(int(a[1::])/10)-2]
if 20<int(a[1::])<30:
x=b+' '+tens[0]+'-'+ones[int(a[1::])%20-1]
if 30<int(a[1::])<40:
x=b+' '+tens[1]+'-'+ones[int(a[1::])%30-1]
if 40<int(a[1::])<50:
x=b+' '+tens[2]+'-'+ones[int(a[1::])%40-1]
if 50<int(a[1::])<60:
x=b+' '+tens[3]+'-'+ones[int(a[1::])%50-1]
if 60<int(a[1::])<70:
x=b+' '+tens[4]+'-'+ones[int(a[1::])%60-1]
if 70<int(a[1::])<80:
x=b+' '+tens[5]+'-'+ones[int(a[1::])%70-1]
if 80<int(a[1::])<90:
x=b+' '+tens[6]+'-'+ones[int(a[1::])%80-1]
if 90<int(a[1::])<100:
x=b+' '+tens[7]+'-'+ones[int(a[1::])%90-1]
if n==1000:
x="one thousand"
return str(x)
#number_to_words(999)
# YOUR CODE HERE
#raise NotImplementedError()
assert number_to_words(5) == 'five'
assert number_to_words(100) == 'one hundred'
assert number_to_words(435) == 'four hundred and thirty-five'
assert True # use this for grading the number_to_words tests.
def count_letters(n):
Count the number of letters used to write out the words for 1-n inclusive.
# YOUR CODE HERE
#raise NotImplementedError()
l = []
lit = np.ones([len(range(n))])
#puts all the written out numbers in a list
for i in range(n):
l.append(number_to_words(i+1))
#reomoves hyphens
y = [k.replace('-','') for k in l]
#removes spaces
z=[m.replace(' ','') for m in y]
#puts the length of each word w/out spaces/hyphens in array
for j in range(n):
lit[j]=len(z[j])
#returns sum of all lengths in array lit
return sum(lit)
# YOUR CODE HERE
#raise NotImplementedError()
assert count_letters(1) == 3
assert count_letters(2) == 6
assert count_letters(5) == 19
assert True # use this for grading the count_letters tests.
# YOUR CODE HERE
#raise NotImplementedError()
count_letters(1000)
assert True # use this for gradig the answer to the original question.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [pooling.GlobalAveragePooling3D.1] input 3x6x6x3, data_format='channels_first'
Step2: [pooling.GlobalAveragePooling3D.2] input 5x3x2x1, data_format='channels_last'
Step3: export for Keras.js tests
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (6, 6, 3, 4)
L = GlobalAveragePooling3D(data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(270)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.GlobalAveragePooling3D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (3, 6, 6, 3)
L = GlobalAveragePooling3D(data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(271)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.GlobalAveragePooling3D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 3, 2, 1)
L = GlobalAveragePooling3D(data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(272)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.GlobalAveragePooling3D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Optimization Analysis
Step2: Load Data
Step3: Plot
Step4: Hardware Grid
Step5: SK Model
Step6: 3 Regular MaxCut
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import recirq
except ImportError:
!pip install -q git+https://github.com/quantumlib/ReCirq sympy~=1.6
from datetime import datetime
import recirq
import cirq
import numpy as np
import pandas as pd
from recirq.qaoa.experiments.optimization_tasks import (
DEFAULT_BASE_DIR,
DEFAULT_PROBLEM_GENERATION_BASE_DIR)
records = []
for record in recirq.iterload_records(dataset_id="2020-03-tutorial", base_dir=DEFAULT_BASE_DIR):
task = record['task']
result = recirq.load(task, DEFAULT_BASE_DIR)
pgen_task = task.generation_task
problem = recirq.load(pgen_task, base_dir=DEFAULT_PROBLEM_GENERATION_BASE_DIR)['problem']
record['problem'] = problem.graph
record['problem_type'] = problem.__class__.__name__
recirq.flatten_dataclass_into_record(record, 'task')
records.append(record)
df = pd.DataFrame(records)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.head()
%matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style('ticks')
plt.rc('axes', labelsize=16, titlesize=16)
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
plt.rc('legend', fontsize=14, title_fontsize=16)
# Load landscape data
from recirq.qaoa.experiments.p1_landscape_tasks import \
DEFAULT_BASE_DIR, DEFAULT_PROBLEM_GENERATION_BASE_DIR, DEFAULT_PRECOMPUTATION_BASE_DIR, \
ReadoutCalibrationTask
records = []
ro_records = []
for record in recirq.iterload_records(dataset_id="2020-03-tutorial", base_dir=DEFAULT_BASE_DIR):
record['timestamp'] = datetime.fromisoformat(record['timestamp'])
dc_task = record['task']
if isinstance(dc_task, ReadoutCalibrationTask):
ro_records.append(record)
continue
pgen_task = dc_task.generation_task
problem = recirq.load(pgen_task, base_dir=DEFAULT_PROBLEM_GENERATION_BASE_DIR)['problem']
record['problem'] = problem.graph
record['problem_type'] = problem.__class__.__name__
record['bitstrings'] = record['bitstrings'].bits
recirq.flatten_dataclass_into_record(record, 'task')
recirq.flatten_dataclass_into_record(record, 'generation_task')
records.append(record)
# Associate each data collection task with its nearest readout calibration
for record in sorted(records, key=lambda x: x['timestamp']):
record['ro'] = min(ro_records, key=lambda x: abs((x['timestamp']-record['timestamp']).total_seconds()))
df_raw = pd.DataFrame(records)
df_raw.head()
from recirq.qaoa.simulation import hamiltonian_objectives
def compute_energies(row):
permutation = []
qubit_map = {}
final_qubit_index = {q: i for i, q in enumerate(row['final_qubits'])}
for i, q in enumerate(row['qubits']):
fi = final_qubit_index[q]
permutation.append(fi)
qubit_map[i] = q
return hamiltonian_objectives(row['bitstrings'],
row['problem'],
permutation,
row['ro']['calibration'],
qubit_map)
# Start cleaning up the raw data
landscape_df = df_raw.copy()
landscape_df = landscape_df.drop(['line_placement_strategy',
'generation_task.dataset_id',
'generation_task.device_name'], axis=1)
# Compute energies
landscape_df['energies'] = landscape_df.apply(compute_energies, axis=1)
landscape_df = landscape_df.drop(['bitstrings', 'problem', 'ro', 'qubits', 'final_qubits'], axis=1)
landscape_df['energy'] = landscape_df.apply(lambda row: np.mean(row['energies']), axis=1)
# We won't do anything with raw energies right now
landscape_df = landscape_df.drop('energies', axis=1)
# Do timing somewhere else
landscape_df = landscape_df.drop([col for col in landscape_df.columns if col.endswith('_time')], axis=1)
import scipy.interpolate
from recirq.qaoa.simulation import lowest_and_highest_energy
def get_problem_graph(problem_type,
n=None,
instance_i=0):
if n is None:
if problem_type == 'HardwareGridProblem':
n = 4
elif problem_type == 'SKProblem':
n = 3
elif problem_type == 'ThreeRegularProblem':
n = 4
else:
raise ValueError(repr(problem_type))
r = df_raw[
(df_raw['problem_type']==problem_type)&
(df_raw['n_qubits']==n)&
(df_raw['instance_i']==instance_i)
]['problem']
return r.iloc[0]
def plot_optimization_path_in_landscape(problem_type, res=200, method='nearest', cmap='PuOr'):
optimization_data = df[df['problem_type'] == problem_type]
landscape_data = landscape_df[landscape_df['problem_type'] == problem_type]
xx, yy = np.meshgrid(np.linspace(0, np.pi/2, res), np.linspace(-np.pi/4, np.pi/4, res))
x_iters = optimization_data['x_iters'].values[0]
min_c, max_c = lowest_and_highest_energy(get_problem_graph(problem_type))
zz = scipy.interpolate.griddata(
points=landscape_data[['gamma', 'beta']].values,
values=landscape_data['energy'].values / min_c,
xi=(xx, yy),
method=method,
)
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
norm = plt.Normalize(max_c/min_c, min_c/min_c)
cmap = 'RdBu'
extent=(0, 4, -2, 2)
g = ax.imshow(zz, extent=extent, origin='lower', cmap=cmap, norm=norm, interpolation='none')
xs, ys = zip(*x_iters)
xs = np.array(xs) / (np.pi / 8)
ys = np.array(ys) / (np.pi / 8)
ax.plot(xs, ys, 'r-')
ax.plot(xs[0], ys[0], 'rs')### Hardware Grid
ax.plot(xs[1:-1], ys[1:-1], 'r.')
ax.plot(xs[-1], ys[-1], 'ro')
x, y = optimization_data['optimal_angles'].values[0]
x /= (np.pi / 8)
y /= (np.pi / 8)
ax.plot(x, y, 'r*')
ax.set_xlabel(r'$\gamma\ /\ (\pi/8)$')
ax.set_ylabel(r'$\beta\ /\ (\pi/8)$')
ax.set_title('Optimization path in landscape')
fig.colorbar(g, ax=ax, shrink=0.8)
def plot_function_values(problem_type):
data = df[df['problem_type'] == problem_type]
function_values = data['func_vals'].values[0]
min_c, _ = lowest_and_highest_energy(get_problem_graph(problem_type))
function_values = np.array(function_values) / min_c
x = range(len(function_values))
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.plot(x, function_values, 'o--')
ax.set_xlabel('Optimization iteration')
ax.set_ylabel(r'$E / E_{min}$')
ax.set_title('Optimization function values')
plot_optimization_path_in_landscape('HardwareGridProblem')
plot_function_values('HardwareGridProblem')
plot_optimization_path_in_landscape('SKProblem')
plot_function_values('SKProblem')
plot_optimization_path_in_landscape('ThreeRegularProblem')
plot_function_values('ThreeRegularProblem')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data and Labels
Step2: Data Visualization
Step3: "one-hot" format to present labels
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
from tensorflow.examples.tutorials.mnist import input_data
%matplotlib inline
epochs = 1000
learning_rate = 0.5
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print (mnist.train.images.shape)
print (mnist.train.labels.shape)
print (mnist.test.images.shape)
print (mnist.test.labels.shape)
pcolor(mnist.train.images[10000].reshape(28,28), cmap=plt.cm.gray_r)
print (mnist.train.images[10000].reshape(28,28)[20:25,5:10])
print ("Label")
print (mnist.train.labels[10000])
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Define loss and optimizer
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# TensorFlow Innitialization
init = tf.initialize_all_variables()
errors = []
with tf.Session() as sess:
sess.run(init)
for i in range(epochs):
batch_xs, batch_ys = mnist.train.next_batch(100)
_, cross_entropy_value, y_value = sess.run([optimizer, cross_entropy, y], feed_dict={x: batch_xs, y_: batch_ys})
accuracy_value = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
errors.append(1-accuracy_value)
print (errors[-1])
plt.plot([np.mean(errors[i-50:i]) for i in range(len(errors))])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples
Step2: And compute the distribution of birth weight for first babies and others.
Step3: We can plot the PMFs on the same scale, but it is hard to see if there is a difference.
Step4: PercentileRank computes the fraction of scores less than or equal to your_score.
Step5: If this is the list of scores.
Step6: And you got the 88, your percentile rank is 80.
Step7: Percentile takes a percentile rank and computes the corresponding percentile.
Step8: The median is the 50th percentile, which is 77.
Step9: Here's a more efficient way to compute percentiles.
Step10: Let's hope we get the same answer.
Step11: The Cumulative Distribution Function (CDF) is almost the same as PercentileRank. The only difference is that the result is 0-1 instead of 0-100.
Step12: In this list
Step13: We can evaluate the CDF for various values
Step14: Here's an example using real data, the distribution of pregnancy length for live births.
Step15: Cdf provides Prob, which evaluates the CDF; that is, it computes the fraction of values less than or equal to the given value. For example, 94% of pregnancy lengths are less than or equal to 41.
Step16: Value evaluates the inverse CDF; given a fraction, it computes the corresponding value. For example, the median is the value that corresponds to 0.5.
Step17: In general, CDFs are a good way to visualize distributions. They are not as noisy as PMFs, and if you plot several CDFs on the same axes, any differences between them are apparent.
Step18: In this example, we can see that first babies are slightly, but consistently, lighter than others.
Step19: Again, the median is the 50th percentile.
Step20: The interquartile range is the interval from the 25th to 75th percentile.
Step21: We can use the CDF to look up the percentile rank of a particular value. For example, my second daughter was 10.2 pounds at birth, which is near the 99th percentile.
Step22: If we draw a random sample from the observed weights and map each weigh to its percentile rank.
Step23: The resulting list of ranks should be approximately uniform from 0-1.
Step24: That observation is the basis of Cdf.Sample, which generates a random sample from a Cdf. Here's an example.
Step25: This confirms that the random sample has the same distribution as the original data.
Step26: Exercise
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import nsfg
import first
import thinkstats2
import thinkplot
live, firsts, others = first.MakeFrames()
first_wgt = firsts.totalwgt_lb
first_wgt_dropna = first_wgt.dropna()
print('Firsts', len(first_wgt), len(first_wgt_dropna))
other_wgt = others.totalwgt_lb
other_wgt_dropna = other_wgt.dropna()
print('Others', len(other_wgt), len(other_wgt_dropna))
first_pmf = thinkstats2.Pmf(first_wgt_dropna, label='first')
other_pmf = thinkstats2.Pmf(other_wgt_dropna, label='other')
width = 0.4 / 16
# plot PMFs of birth weights for first babies and others
thinkplot.PrePlot(2)
thinkplot.Hist(first_pmf, align='right', width=width)
thinkplot.Hist(other_pmf, align='left', width=width)
thinkplot.Config(xlabel='Weight (pounds)', ylabel='PMF')
def PercentileRank(scores, your_score):
count = 0
for score in scores:
if score <= your_score:
count += 1
percentile_rank = 100.0 * count / len(scores)
return percentile_rank
t = [55, 66, 77, 88, 99]
PercentileRank(t, 88)
def Percentile(scores, percentile_rank):
scores.sort()
for score in scores:
if PercentileRank(scores, score) >= percentile_rank:
return score
Percentile(t, 50)
def Percentile2(scores, percentile_rank):
scores.sort()
index = percentile_rank * (len(scores)-1) // 100
return scores[index]
Percentile2(t, 50)
def EvalCdf(sample, x):
count = 0.0
for value in sample:
if value <= x:
count += 1
prob = count / len(sample)
return prob
t = [1, 2, 2, 3, 5]
EvalCdf(t, 0), EvalCdf(t, 1), EvalCdf(t, 2), EvalCdf(t, 3), EvalCdf(t, 4), EvalCdf(t, 5)
cdf = thinkstats2.Cdf(live.prglngth, label='prglngth')
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='Pregnancy length (weeks)', ylabel='CDF', loc='upper left')
cdf.Prob(41)
cdf.Value(0.5)
first_cdf = thinkstats2.Cdf(firsts.totalwgt_lb, label='first')
other_cdf = thinkstats2.Cdf(others.totalwgt_lb, label='other')
thinkplot.PrePlot(2)
thinkplot.Cdfs([first_cdf, other_cdf])
thinkplot.Config(xlabel='Weight (pounds)', ylabel='CDF')
weights = live.totalwgt_lb
live_cdf = thinkstats2.Cdf(weights, label='live')
median = live_cdf.Percentile(50)
median
iqr = (live_cdf.Percentile(25), live_cdf.Percentile(75))
iqr
live_cdf.PercentileRank(10.2)
sample = np.random.choice(weights, 100, replace=True)
ranks = [live_cdf.PercentileRank(x) for x in sample]
rank_cdf = thinkstats2.Cdf(ranks)
thinkplot.Cdf(rank_cdf)
thinkplot.Config(xlabel='Percentile rank', ylabel='CDF')
resample = live_cdf.Sample(1000)
thinkplot.Cdf(live_cdf)
thinkplot.Cdf(thinkstats2.Cdf(resample, label='resample'))
thinkplot.Config(xlabel='Birth weight (pounds)', ylabel='CDF')
# Solution
cdf.PercentileRank(8.5)
# Solution
other_cdf.PercentileRank(8.5)
# Solution
t = np.random.random(1000)
# Solution
pmf = thinkstats2.Pmf(t)
thinkplot.Pmf(pmf, linewidth=0.1)
thinkplot.Config(xlabel='Random variate', ylabel='PMF')
# Solution
cdf = thinkstats2.Cdf(t)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='Random variate', ylabel='CDF')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Kafka Producer Test Script
Step2: The "getRandomPitch" method will generate a mock "reading" between a min and max range. The position can be arbitrarily defined or if you want to be very accurate, the debug output of the machine learning model can be used here.
Step3: The "sendSensorReadings" method creates mock "actual" readings. A random reading is generated and a JSON packet is created and sent to Kafka using the "LumbarSensorReadings" topic.
Step4: The "sendSensorTrainingReadings" method creates mock Training readings that are subesquently used in the machine learning model generation. A random reading is generated with a known posture and a JSON packet is created and sent to Kafka using the "LumbarSensorTrainingReadings" topic.
Step5: The main method calls the methods with a defined number of iterations.
|
<ASSISTANT_TASK:>
Python Code:
import time
from kafka import KafkaProducer
import json
import random
import csv
import uuid
import datetime
Usage: bin/spark-submit ~/spark/kafkaProducrerTest.py
producer = KafkaProducer(bootstrap_servers='localhost:9092')
pitch = 0
position = 0
def getRandomPitch(min,max):
pitch = random.uniform(min,max)
if pitch < -25:
position=2
elif pitch < -15:
position=1
else:
position=0
return pitch, position
def sendSensorReadings(iterations):
for i in range(iterations):
readingID = str(uuid.uuid4())
pitch, position = getRandomPitch(-50,25)
if pitch <= -15:
if pitch <= -25:
position = 2.0
elif pitch > -25:
if pitch <= -15:
position = 1.0
packet = {
"readingID":readingID,
"deviceID":"5d681c54e66ff4a5654e55c6d5a5b54",
"readingTime":datetime.datetime.now().isoformat(),
"metricTypeID":6,
"uomID":4,
"actual":{"y":-30,"p":pitch,"r":120},
"setPoints":{"y":25,"p":45,"r":100},
"prevAvg":{"y":15,"p":40,"r":88}
}
print(packet)
producer.send('LumbarSensorReadings', json.dumps(packet))
def sendSensorTrainingReadings(iterations):
for i in range(iterations):
pitch, position = getRandomPitch(-50,25)
if pitch <= -15:
if pitch <= -25:
position = 2.0
elif pitch > -25:
if pitch <= -15:
position = 1.0
packet = {
"deviceID":"5d681c54e66ff4a5654e55c6d5a5b54",
"positionID":position,
"readingTime":"2016-07-25T15:45:07.12",
"metricTypeID":6,
"uomID":4,
"actual":{"y":18,"p":pitch,"r":120},
"setPoints":{"y":25,"p":45,"r":100},
"prevAvg":{"y":15,"p":40,"r":88}
}
producer.send('LumbarSensorTrainingReadings', json.dumps(packet))
def main():
sendSensorReadings(50)
sendSensorTrainingReadings(50)
if __name__ == "__main__":
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 1
Step2: Question 2
Step3: Question 3
Step4: Question 4
|
<ASSISTANT_TASK:>
Python Code:
from pandas import DataFrame, Series
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
weather = pd.read_table('daily_weather.tsv')
usage = pd.read_table('usage_2012.tsv')
stations = pd.read_table('stations.tsv')
newseasons = {'Summer': 'Spring', 'Spring': 'Winter', 'Fall': 'Summer', 'Winter': 'Fall'}
weather['season_desc'] = weather['season_desc'].map(newseasons)
weather['Day'] = pd.DatetimeIndex(weather.date).date
weather['Month'] = pd.DatetimeIndex(weather.date).month
weather['temp'].plot()
# weather.plot(kind='line', y='temp', x='Day')
plt.show()
weather[['Month', 'humidity', 'temp']].groupby('Month').aggregate(np.mean).plot(kind='bar')
plt.show()
w = weather[['season_desc', 'temp', 'total_riders']]
w_fal = w.loc[w['season_desc'] == 'Fall']
w_win = w.loc[w['season_desc'] == 'Winter']
w_spr = w.loc[w['season_desc'] == 'Spring']
w_sum = w.loc[w['season_desc'] == 'Summer']
plt.scatter(w_fal['temp'], w_fal['total_riders'], c='y', label='Fall', s=100, alpha=.5)
plt.scatter(w_win['temp'], w_win['total_riders'], c='r', label='Winter', s=100, alpha=.5)
plt.scatter(w_spr['temp'], w_spr['total_riders'], c='b', label='Sprint', s=100, alpha=.5)
plt.scatter(w_sum['temp'], w_sum['total_riders'], c='g', label='Summer', s=100, alpha=.5)
plt.legend(loc='lower right')
plt.xlabel('Temperature')
plt.ylabel('Total Riders')
plt.show()
w = weather[['season_desc', 'windspeed', 'total_riders']]
w_fal = w.loc[w['season_desc'] == 'Fall']
w_win = w.loc[w['season_desc'] == 'Winter']
w_spr = w.loc[w['season_desc'] == 'Spring']
w_sum = w.loc[w['season_desc'] == 'Summer']
plt.scatter(w_fal['windspeed'], w_fal['total_riders'], c='y', label='Fall', s=100, alpha=.5)
plt.scatter(w_win['windspeed'], w_win['total_riders'], c='r', label='Winter', s=100, alpha=.5)
plt.scatter(w_spr['windspeed'], w_spr['total_riders'], c='b', label='Sprint', s=100, alpha=.5)
plt.scatter(w_sum['windspeed'], w_sum['total_riders'], c='g', label='Summer', s=100, alpha=.5)
plt.legend(loc='lower right')
plt.xlabel('Wind Speed')
plt.ylabel('Total Riders')
plt.show()
s = stations[['station','lat','long']]
u = pd.concat([usage['station_start']], axis=1, keys=['station'])
counts = u['station'].value_counts()
c = DataFrame(counts.index, columns=['station'])
c['counts'] = counts.values
m = pd.merge(s, c, on='station')
plt.scatter(m['long'], m['lat'], c='b', label='Location', s=(m['counts'] * .05), alpha=.1)
plt.legend(loc='lower right')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Integrated gradients
Step2: Download a pretrained image classifier from TF-Hub
Step3: From the module page, you need to keep in mind the following about Inception V1
Step4: Load and preprocess images with tf.image
Step5: Classify images
Step8: Calculate Integrated Gradients
Step9: left
Step10: Unpack formulas into code
Step11: Let's use the above function to generate interpolated images along a linear path at alpha intervals between a black baseline image and the example "Fireboat" image.
Step12: Let's visualize the interpolated images. Note
Step13: Compute gradients
Step14: Let's compute the gradients for each image along the interpolation path with respect to the correct output. Recall that your model returns a (1, 1001) shaped Tensor with logits that you convert to predicted probabilities for each class. You need to pass the correct ImageNet target class index to the compute_gradients function for your image.
Step15: Note the output shape of (n_interpolated_images, img_height, img_width, RGB), which gives us the gradient for every pixel of every image along the interpolation path. You can think of these gradients as measuring the change in your model's predictions for each small step in the feature space.
Step16: Visualizing gradient saturation
Step17: left
Step18: The integral_approximation function takes the gradients of the predicted probability of the target class with respect to the interpolated images between the baseline and the original image.
Step19: You can confirm averaging across the gradients of m interpolated images returns an integrated gradients tensor with the same shape as the original "Giant Panda" image.
Step20: Putting it all together
Step21: Again, you can check that the IG feature attributions have the same shape as the input "Fireboat" image.
Step22: The paper suggests the number of steps to range between 20 to 300 depending upon the example (although in practice this can be higher in the 1,000s to accurately approximate the integral). You can find additional code to check for the appropriate number of steps in the "Next steps" resources at the end of this tutorial.
Step23: Looking at the attributions on the "Fireboat" image, you can see the model identifies the water cannons and spouts as contributing to its correct prediction.
Step24: On the "Giant Panda" image, the attributions highlight the texture, nose, and the fur of the Panda's face.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
model = tf.keras.Sequential([
hub.KerasLayer(
name='inception_v1',
handle='https://tfhub.dev/google/imagenet/inception_v1/classification/4',
trainable=False),
])
model.build([None, 224, 224, 3])
model.summary()
def load_imagenet_labels(file_path):
labels_file = tf.keras.utils.get_file('ImageNetLabels.txt', file_path)
with open(labels_file) as reader:
f = reader.read()
labels = f.splitlines()
return np.array(labels)
imagenet_labels = load_imagenet_labels('https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
def read_image(file_name):
image = tf.io.read_file(file_name)
image = tf.io.decode_jpeg(image, channels=3)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize_with_pad(image, target_height=224, target_width=224)
return image
img_url = {
'Fireboat': 'http://storage.googleapis.com/download.tensorflow.org/example_images/San_Francisco_fireboat_showing_off.jpg',
'Giant Panda': 'http://storage.googleapis.com/download.tensorflow.org/example_images/Giant_Panda_2.jpeg',
}
img_paths = {name: tf.keras.utils.get_file(name, url) for (name, url) in img_url.items()}
img_name_tensors = {name: read_image(img_path) for (name, img_path) in img_paths.items()}
plt.figure(figsize=(8, 8))
for n, (name, img_tensors) in enumerate(img_name_tensors.items()):
ax = plt.subplot(1, 2, n+1)
ax.imshow(img_tensors)
ax.set_title(name)
ax.axis('off')
plt.tight_layout()
def top_k_predictions(img, k=3):
image_batch = tf.expand_dims(img, 0)
predictions = model(image_batch)
probs = tf.nn.softmax(predictions, axis=-1)
top_probs, top_idxs = tf.math.top_k(input=probs, k=k)
top_labels = imagenet_labels[tuple(top_idxs)]
return top_labels, top_probs[0]
for (name, img_tensor) in img_name_tensors.items():
plt.imshow(img_tensor)
plt.title(name, fontweight='bold')
plt.axis('off')
plt.show()
pred_label, pred_prob = top_k_predictions(img_tensor)
for label, prob in zip(pred_label, pred_prob):
print(f'{label}: {prob:0.1%}')
def f(x):
A simplified model function.
return tf.where(x < 0.8, x, 0.8)
def interpolated_path(x):
A straight line path.
return tf.zeros_like(x)
x = tf.linspace(start=0.0, stop=1.0, num=6)
y = f(x)
#@title
fig = plt.figure(figsize=(12, 5))
ax0 = fig.add_subplot(121)
ax0.plot(x, f(x), marker='o')
ax0.set_title('Gradients saturate over F(x)', fontweight='bold')
ax0.text(0.2, 0.5, 'Gradients > 0 = \n x is important')
ax0.text(0.7, 0.85, 'Gradients = 0 \n x not important')
ax0.set_yticks(tf.range(0, 1.5, 0.5))
ax0.set_xticks(tf.range(0, 1.5, 0.5))
ax0.set_ylabel('F(x) - model true class predicted probability')
ax0.set_xlabel('x - (pixel value)')
ax1 = fig.add_subplot(122)
ax1.plot(x, f(x), marker='o')
ax1.plot(x, interpolated_path(x), marker='>')
ax1.set_title('IG intuition', fontweight='bold')
ax1.text(0.25, 0.1, 'Accumulate gradients along path')
ax1.set_ylabel('F(x) - model true class predicted probability')
ax1.set_xlabel('x - (pixel value)')
ax1.set_yticks(tf.range(0, 1.5, 0.5))
ax1.set_xticks(tf.range(0, 1.5, 0.5))
ax1.annotate('Baseline', xy=(0.0, 0.0), xytext=(0.0, 0.2),
arrowprops=dict(facecolor='black', shrink=0.1))
ax1.annotate('Input', xy=(1.0, 0.0), xytext=(0.95, 0.2),
arrowprops=dict(facecolor='black', shrink=0.1))
plt.show();
baseline = tf.zeros(shape=(224,224,3))
plt.imshow(baseline)
plt.title("Baseline")
plt.axis('off')
plt.show()
m_steps=50
alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps+1) # Generate m_steps intervals for integral_approximation() below.
def interpolate_images(baseline,
image,
alphas):
alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis]
baseline_x = tf.expand_dims(baseline, axis=0)
input_x = tf.expand_dims(image, axis=0)
delta = input_x - baseline_x
images = baseline_x + alphas_x * delta
return images
interpolated_images = interpolate_images(
baseline=baseline,
image=img_name_tensors['Fireboat'],
alphas=alphas)
fig = plt.figure(figsize=(20, 20))
i = 0
for alpha, image in zip(alphas[0::10], interpolated_images[0::10]):
i += 1
plt.subplot(1, len(alphas[0::10]), i)
plt.title(f'alpha: {alpha:.1f}')
plt.imshow(image)
plt.axis('off')
plt.tight_layout();
def compute_gradients(images, target_class_idx):
with tf.GradientTape() as tape:
tape.watch(images)
logits = model(images)
probs = tf.nn.softmax(logits, axis=-1)[:, target_class_idx]
return tape.gradient(probs, images)
path_gradients = compute_gradients(
images=interpolated_images,
target_class_idx=555)
print(path_gradients.shape)
pred = model(interpolated_images)
pred_proba = tf.nn.softmax(pred, axis=-1)[:, 555]
#@title
plt.figure(figsize=(10, 4))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(alphas, pred_proba)
ax1.set_title('Target class predicted probability over alpha')
ax1.set_ylabel('model p(target class)')
ax1.set_xlabel('alpha')
ax1.set_ylim([0, 1])
ax2 = plt.subplot(1, 2, 2)
# Average across interpolation steps
average_grads = tf.reduce_mean(path_gradients, axis=[1, 2, 3])
# Normalize gradients to 0 to 1 scale. E.g. (x - min(x))/(max(x)-min(x))
average_grads_norm = (average_grads-tf.math.reduce_min(average_grads))/(tf.math.reduce_max(average_grads)-tf.reduce_min(average_grads))
ax2.plot(alphas, average_grads_norm)
ax2.set_title('Average pixel gradients (normalized) over alpha')
ax2.set_ylabel('Average pixel gradients')
ax2.set_xlabel('alpha')
ax2.set_ylim([0, 1]);
def integral_approximation(gradients):
# riemann_trapezoidal
grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)
integrated_gradients = tf.math.reduce_mean(grads, axis=0)
return integrated_gradients
ig = integral_approximation(
gradients=path_gradients)
print(ig.shape)
def integrated_gradients(baseline,
image,
target_class_idx,
m_steps=50,
batch_size=32):
# Generate alphas.
alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps+1)
# Collect gradients.
gradient_batches = []
# Iterate alphas range and batch computation for speed, memory efficiency, and scaling to larger m_steps.
for alpha in tf.range(0, len(alphas), batch_size):
from_ = alpha
to = tf.minimum(from_ + batch_size, len(alphas))
alpha_batch = alphas[from_:to]
gradient_batch = one_batch(baseline, image, alpha_batch, target_class_idx)
gradient_batches.append(gradient_batch)
# Concatenate path gradients together row-wise into single tensor.
total_gradients = tf.concat(gradient_batches, axis=0)
# Integral approximation through averaging gradients.
avg_gradients = integral_approximation(gradients=total_gradients)
# Scale integrated gradients with respect to input.
integrated_gradients = (image - baseline) * avg_gradients
return integrated_gradients
@tf.function
def one_batch(baseline, image, alpha_batch, target_class_idx):
# Generate interpolated inputs between baseline and input.
interpolated_path_input_batch = interpolate_images(baseline=baseline,
image=image,
alphas=alpha_batch)
# Compute gradients between model outputs and interpolated inputs.
gradient_batch = compute_gradients(images=interpolated_path_input_batch,
target_class_idx=target_class_idx)
return gradient_batch
ig_attributions = integrated_gradients(baseline=baseline,
image=img_name_tensors['Fireboat'],
target_class_idx=555,
m_steps=240)
print(ig_attributions.shape)
#@title
def plot_img_attributions(baseline,
image,
target_class_idx,
m_steps=50,
cmap=None,
overlay_alpha=0.4):
attributions = integrated_gradients(baseline=baseline,
image=image,
target_class_idx=target_class_idx,
m_steps=m_steps)
# Sum of the attributions across color channels for visualization.
# The attribution mask shape is a grayscale image with height and width
# equal to the original image.
attribution_mask = tf.reduce_sum(tf.math.abs(attributions), axis=-1)
fig, axs = plt.subplots(nrows=2, ncols=2, squeeze=False, figsize=(8, 8))
axs[0, 0].set_title('Baseline image')
axs[0, 0].imshow(baseline)
axs[0, 0].axis('off')
axs[0, 1].set_title('Original image')
axs[0, 1].imshow(image)
axs[0, 1].axis('off')
axs[1, 0].set_title('Attribution mask')
axs[1, 0].imshow(attribution_mask, cmap=cmap)
axs[1, 0].axis('off')
axs[1, 1].set_title('Overlay')
axs[1, 1].imshow(attribution_mask, cmap=cmap)
axs[1, 1].imshow(image, alpha=overlay_alpha)
axs[1, 1].axis('off')
plt.tight_layout()
return fig
_ = plot_img_attributions(image=img_name_tensors['Fireboat'],
baseline=baseline,
target_class_idx=555,
m_steps=240,
cmap=plt.cm.inferno,
overlay_alpha=0.4)
_ = plot_img_attributions(image=img_name_tensors['Giant Panda'],
baseline=baseline,
target_class_idx=389,
m_steps=55,
cmap=plt.cm.viridis,
overlay_alpha=0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nims-kma', 'sandbox-2', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step7: Code from previous chapters
Step8: Contact number
Step9: The following loop shows how we can loop through the columns and rows of the SweepFrame. With 11 rows and 4 columns, there are 44 elements.
Step11: Now we can wrap that loop in a function and plot the results. For each element of the SweepFrame, we have beta, gamma, and frac_infected, and we plot beta/gamma on the x-axis and frac_infected on the y-axis.
Step12: Here's what it looks like
Step13: It turns out that the ratio beta/gamma, called the "contact number" is sufficient to predict the total number of infections; we don't have to know beta and gamma separately.
Step14: total_infected is the change in $s$ from the beginning to the end.
Step15: Now we can plot the analytic results and compare them to the simulations.
Step16: The agreement is generally good, except for values of c less than 1.
Step17: Exercise
|
<ASSISTANT_TASK:>
Python Code:
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
def make_system(beta, gamma):
Make a system object for the SIR model.
beta: contact rate in days
gamma: recovery rate in days
returns: System object
init = State(S=89, I=1, R=0)
init /= np.sum(init)
t0 = 0
t_end = 7 * 14
return System(init=init, t0=t0, t_end=t_end,
beta=beta, gamma=gamma)
def update_func(state, t, system):
Update the SIR model.
state: State (s, i, r)
t: time
system: System object
returns: State (sir)
s, i, r = state
infected = system.beta * i * s
recovered = system.gamma * i
s -= infected
i += infected - recovered
r += recovered
return State(S=s, I=i, R=r)
def run_simulation(system, update_func):
Runs a simulation of the system.
system: System object
update_func: function that updates state
returns: TimeFrame
init, t0, t_end = system.init, system.t0, system.t_end
frame = TimeFrame(columns=init.index)
frame.row[t0] = init
for t in linrange(t0, t_end):
frame.row[t+1] = update_func(frame.row[t], t, system)
return frame
def calc_total_infected(results):
Fraction of population infected during the simulation.
results: DataFrame with columns S, I, R
returns: fraction of population
return get_first_value(results.S) - get_last_value(results.S)
def sweep_beta(beta_array, gamma):
Sweep a range of values for beta.
beta_array: array of beta values
gamma: recovery rate
returns: SweepSeries that maps from beta to total infected
sweep = SweepSeries()
for beta in beta_array:
system = make_system(beta, gamma)
results = run_simulation(system, update_func)
sweep[system.beta] = calc_total_infected(results)
return sweep
def sweep_parameters(beta_array, gamma_array):
Sweep a range of values for beta and gamma.
beta_array: array of infection rates
gamma_array: array of recovery rates
returns: SweepFrame with one row for each beta
and one column for each gamma
frame = SweepFrame(columns=gamma_array)
for gamma in gamma_array:
frame[gamma] = sweep_beta(beta_array, gamma)
return frame
beta_array = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0 , 1.1]
gamma_array = [0.2, 0.4, 0.6, 0.8]
frame = sweep_parameters(beta_array, gamma_array)
frame.head()
frame.shape
for gamma in frame.columns:
column = frame[gamma]
for beta in column.index:
frac_infected = column[beta]
print(beta, gamma, frac_infected)
def plot_sweep_frame(frame):
Plot the values from a SweepFrame.
For each (beta, gamma), compute the contact number,
beta/gamma
frame: SweepFrame with one row per beta, one column per gamma
for gamma in frame.columns:
column = frame[gamma]
for beta in column.index:
frac_infected = column[beta]
plot(beta/gamma, frac_infected, 'ro')
plot_sweep_frame(frame)
decorate(xlabel='Contact number (beta/gamma)',
ylabel='Fraction infected')
savefig('figs/chap14-fig01.pdf')
s_inf_array = linspace(0.0001, 0.9999, 101);
c_array = log(s_inf_array) / (s_inf_array - 1);
frac_infected = 1 - s_inf_array
frac_infected_series = Series(frac_infected, index=c_array);
plot_sweep_frame(frame)
plot(frac_infected_series, label='Analysis')
decorate(xlabel='Contact number (c)',
ylabel='Fraction infected')
savefig('figs/chap14-fig02.pdf')
# Solution
def plot_sweep_frame_difference(frame):
for gamma in frame.columns:
column = frame[gamma]
for beta in column.index:
frac_infected = column[beta]
plot(beta - gamma, frac_infected, 'ro')
# Solution
plot_sweep_frame_difference(frame)
decorate(xlabel='Excess infection rate (infections-recoveries per day)',
ylabel='Fraction infected',
legend=False)
# Solution
# The results don't fall on a line, which means that if we know the difference between
# `beta` and `gamma`, but not their ratio, that's not enough to predict the fraction infected.
# Solution
frac_infected_series
# Solution
# It looks like the fraction infected is 0.26 when the contact number is about 1.16
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Rossman Deep Learning Modeling
Step2: Here, we will remove all records where the store had zero sale / was closed (feel free to experiment with not excluding the zero sales record and see if improves performance)
Step3: We print out the min/max time stamp of the training and test set to confirm that the two sets doesn't overlap.
Step4: Our training data is already sorted by date in decreasing order, hence we can create the validation set by checking how big is our test set and select the top-N observations to create a validation set that has similar size to our test set. Here we're saying similar size and not exact size, because we make sure that all the records from the same date falls under either training or validation set.
Step5: Here, we print out the dataframe where we'll be doing the train/validation cut to illustrate the point, this is technically not required for the rest of the pipeline. Notice in the dataframe that we've printed out, the last record's date, 2015-06-18 is different from the rest. This means that all records including/after the date 2015-06-19 will become our validation set.
Step6: Model Training
Step7: The fastai will automatically fit a regression model when the dependent variable is a float, but not when it's an int. So in order to apply regression we need to tell fastai it is a float type, hence the argument label_cls=FloatList when creating the DataBunch that is required for training the model.
Step8: We can specify the capping for our prediction, ensuring that it won't be a negative value and it won't go beyond 1.2 times the maximum sales value we see in the dataset.
Step9: We'll now use all the information we have to create a fastai TabularModel. Here we've defined a fixed model with 2 hidden layers, we also try to avoid overfitting by applying regularization. This can be done by performing dropout, which we can specify the dropout probability at each layer with argument ps (more commonly seen) and the embedding (input) dropout with argument emb_drop.
Step10: Printing out the model architecture, we see it first consists of a list of Embedding layer, one for each categorical variable. Recall the shape of Embedding layer is (the number of distinct categories, the dimension of the embedding). When we specify our fastai learner, we didn't specify emb_szs argument, which lets us specify the embedding size for each of our categorical variable. Hence the embedding size will be determined algorithmically. e.g. Our first embedding is for our Store feature, it shows it has 1116 of them and 81 is corresponding embedding size that was chosen.
Step11: We can leverage the get_preds method to return the predictions and targets on the type of dataset. For the test set, we're only interested in the prediction.
|
<ASSISTANT_TASK:>
Python Code:
from jupyterthemes import get_themes
from jupyterthemes.stylefx import set_nb_theme
themes = get_themes()
set_nb_theme(themes[3])
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format='retina'
import os
import torch
import numpy as np
import pandas as pd
%watermark -a 'Ethen' -d -t -v -p numpy,pandas,pyarrow,torch,fastai
data_dir = 'cleaned_data'
path_train = os.path.join(data_dir, 'train_clean.parquet')
path_test = os.path.join(data_dir, 'test_clean.parquet')
engine = 'pyarrow'
df_train = pd.read_parquet(path_train, engine)
df_test = pd.read_parquet(path_test, engine)
print('train dimension: ', df_train.shape)
print('test dimension: ', df_test.shape)
df_train.head()
cat_names = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday',
'CompetitionMonthsOpen', 'Promo2Weeks', 'StoreType', 'Assortment',
'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear',
'State', 'Week', 'Events']
cont_names = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC',
'Min_TemperatureC', 'Max_Humidity', 'Mean_Humidity', 'Min_Humidity',
'Max_Wind_SpeedKm_h', 'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend',
'trend_DE', 'Promo', 'SchoolHoliday', 'AfterSchoolHoliday',
'AfterStateHoliday', 'AfterPromo', 'BeforeSchoolHoliday',
'BeforeStateHoliday', 'BeforePromo']
dep_var = 'Sales'
df_train = df_train[df_train[dep_var] != 0].reset_index(drop=True)
df_test['Date'].min(), df_test['Date'].max()
# the minimum date of the test set is larger than the maximum date of the
# training set
df_train['Date'].min(), df_train['Date'].max()
mask = df_train['Date'] == df_train['Date'].iloc[len(df_test)]
cut = df_train.loc[mask, 'Date'].index.max()
# fastai expects a collection of int for specifying which index belongs
# to the validation set
valid_idx = range(cut)
valid_idx
df_train.loc[(cut - 2):(cut + 1)]
df_train = df_train[cat_names + cont_names + [dep_var]]
df_test = df_test[cat_names + cont_names + ['Id']]
print('train dimension: ', df_train.shape)
print('test dimension: ', df_test.shape)
df_train.head()
from fastai.tabular import DatasetType
from fastai.tabular import defaults, tabular_learner, exp_rmspe, TabularList
from fastai.tabular import Categorify, Normalize, FillMissing, FloatList
procs = [FillMissing, Categorify, Normalize]
# regression
data = (TabularList
.from_df(df_train, path=data_dir, cat_names=cat_names,
cont_names=cont_names, procs=procs)
.split_by_idx(valid_idx)
.label_from_df(cols=dep_var, label_cls=FloatList, log=True)
.add_test(TabularList.from_df(df_test, path=data_dir,
cat_names=cat_names, cont_names=cont_names))
.databunch())
max_log_y = np.log(np.max(df_train[dep_var]) * 1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000, 500], ps=[0.001, 0.01], emb_drop=0.04,
y_range=y_range, metrics=exp_rmspe)
learn.model
learn.model.n_emb + learn.model.n_cont
# training time shown here is for a 8 core cpu
learn.fit_one_cycle(6, 1e-3, wd=0.2)
test_preds = learn.get_preds(ds_type=DatasetType.Test)
test_preds[:5]
# we logged our label, remember to exponentiate it back to the original scale
df_test[dep_var] = np.exp(test_preds[0].numpy().ravel())
df_test[['Id', dep_var]] = df_test[['Id', dep_var]].astype('int')
submission_dir = 'submission'
if not os.path.isdir(submission_dir):
os.makedirs(submission_dir, exist_ok=True)
submission_path = os.path.join(submission_dir, 'rossmann_submission_fastai.csv')
df_test[['Id', dep_var]].to_csv(submission_path, index=False)
df_test[['Id', dep_var]].head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Maintenant on peux se servir de
Step2: 2) Correction de fonctions en vrac (les fonction suivantes doivent être corrigées)
Step3: 2.2) Factorielle
Step4: 3) Exercices de simplification
Step5: La cellule suivante doit afficher 2 => True, 3 => False
Step6: 3.2) Exercice 2
Step7: La cellule suivante doit afficher 4 => True, 6 => False
Step8: 3.3 Exercice 3
Step9: La cellule suivante doit afficher [6,15,24] et [6,8,10]
Step10: 3.4) Exercice 4
Step11: La cellule suivante doit afficher cas 1 => True, cas 2 => False
Step12: 3.5) Exercice 5 - fizzbuzz
Step13: La cellule suivante doit afficher
Step14: 3.6) Exercice 7 - calcul de combinatoire
Step15: La cellule ci-dessous doit afficher C(4,2) => 6, C(6,3) => 20
Step16: 3.7) Exercice 8 - fonction maximum_pair
Step17: La cellule suivante doit afficher cas 1 => 12, cas 2 => 0
Step18: 3.8) Exercice 9 - Fonction carre magique
Step19: La cellule suivante doit afficher True, False, False
|
<ASSISTANT_TASK:>
Python Code:
def cube_positif( x ):
if abs( x*x*x >= 0.0):
return x*x*x
print("Erreur")
return
cube_positif(-4)
%matplotlib inline
import matplotlib.pyplot as plt
#On commence sans fonction,
XX=[]
YY=[]
X=range(10,-10,-1)
for x in X:
XX.append(x)
YY.append(x*x*x)
plt.plot(XX,YY)
def cube_positif( x ):
if x*x*x >= 0.0:
return x*x*x
elif x*x*x <= 0.0:
return -1*x*x*x
#On peut simplifier cette fonction
#On va s'en servir de la façon suivante:
XX=[]
YY=[]
X=range(10,-10,-1)
for x in X:
XX.append(x)
YY.append(cube_positif( x ))
plt.plot(XX,YY)
plt.show()
def est_entier(x):
return x == int(x)
def est_premier(n):
if n < 2:
return False
for it in range(2,int(n**0.5)+1):
if n % it == 0:
return False
return True
n=float(input())
print("est entier ? : {}".format(est_entier(n)))
print("est premier ? : {}".format(est_premier(n)))
#Que se passe t'il si l'on ne force pas n a être un nombre ?
#Que se passe t'il si on a un nombre a virgule (float) ?
#Est ce que ce nombre doit être nécessairement un int ou est ce que cela peut être autre chose ?
## Référence - à priori ne la modifiez pas, s'il vous plaît
def factorielle( int( n ) ):
res = 1
for it in range(1,n+1):
res = res*it
return res
#Aide:
#Cette partie du code contiens deux erreurs
#Un problème de passage de variable
#Un problème lié a l'appartenance à un bloc
## DM - exercice n°1
## Modifiez le code ci-dessous, et ne modifiez pas ces commentaires
def factorielle( int( n ) ):
res = 1
for it in range(1,n+1):
res = res*it
return res
print( factorielle( 10 ) )
#devrait donner 3628800
## Référence - à priori ne la modifiez pas, s'il vous plaît
def est_pair(n):
if n % 2 == 0:
return True
else:
return False
## DM - exercice n°2
## Modifiez le code ci-dessous, et ne modifiez pas ces commentaires
def est_pair(n):
if n % 2 == 0:
return True
else:
return False
print( "2 => ", est_pair(2) )
print( "3 => ", est_pair(3) )
## Référence - à priori ne la modifiez pas, s'il vous plaît
def possede_un_seul_diviseur(n):
nb_diviseur = 0
for it in range(2,n):
if n % it == 0:
nb_diviseur = nb_diviseur + 1
if nb_diviseur == 1:
return True
return False
## DM - exercice n°3
## Modifiez le code ci-dessous, et ne modifiez pas ces commentaires
def possede_un_seul_diviseur(n):
nb_diviseur = 0
for it in range(2,n):
if n % it == 0:
nb_diviseur = nb_diviseur + 1
if nb_diviseur == 1:
return True
return False
print( "4 => ", possede_un_seul_diviseur(4) )
print( "6 => ", possede_un_seul_diviseur(6) )
## Référence - à priori ne la modifiez pas, s'il vous plaît
def somme_partielle(liste):
if len(liste) != 9:
return None
a1 = 0
for it in range(0, len(liste[0:3])):
a1 = a1 + liste[it]
a2 = 0
for it in range(3, len(liste[3:6])):
a2 = a2 + liste[it]
a3 = 0
for it in range(6, len(liste[6:9])):
a3 = a3 + liste[it]
res = []
res.append(a1)
res.append(a2)
res.append(a3)
return res
## DM - exercice n°4
## Modifiez le code ci-dessous, et ne modifiez pas ces commentaires
def somme_partielle(liste):
if len(liste) != 9:
return None
a1 = 0
for it in range(0, len(liste[0:3])):
a1 = a1 + liste[it]
a2 = 0
for it in range(3, 3 + len(liste[3:6])):
a2 = a2 + liste[it]
a3 = 0
for it in range(6, 6 + len(liste[6:9])):
a3 = a3 + liste[it]
res = []
res.append(a1)
res.append(a2)
res.append(a3)
return res
a = somme_partielle( [1,2,3,4,5,6,7,8,9] )
print( a ) ## Affiche [6, 15, 24]
b = somme_partielle( [0,1,5,0,1,7,0,1,9] )
print( b ) ## Affiche [6,8,10]
## Référence - à priori ne la modifiez pas, s'il vous plaît
def verifier_si_tous_les_elements_sont_egaux(liste):
nb_egaux = 0
for it in liste:
for it2 in liste:
if it != it2:
return False
else:
nb_egaux += 1
if nb_egaux == len(liste)**2:
return True
else:
return False
## DM - exercice n°5
## Modifiez le code ci-dessous, et ne modifiez pas ces commentaires
def verifier_si_tous_les_elements_sont_egaux(liste):
nb_egaux = 0
for it in liste:
for it2 in liste:
if it != it2:
return False
else:
nb_egaux += 1
if nb_egaux == len(liste)**2:
return True
else:
return False
print("Cas 1 =>", verifier_si_tous_les_elements_sont_egaux([1,1,1,1,1,1,1,1]))
print("Cas 2 =>", verifier_si_tous_les_elements_sont_egaux([1,1,1,1,1,1,1,2]))
## Référence - à priori ne la modifiez pas, s'il vous plaît
def fizzbuzz(n):
it = 1
while it <= n:
if (it % 3 == 0) and (it % 5 == 0):
print("fizzbuzz")
if (it % 3 == 0) and (it % 5 != 0):
print("fizz")
if (it % 3 != 0) and (it % 5 == 0):
print("buzz")
if (it % 3 != 0) and (it % 5 != 0):
print(it)
it = it + 1
## DM - exercice n°6
## Modifiez le code ci-dessous, et ne modifiez pas ces commentaires
def fizzbuzz(n):
it = 1
while it <= n:
if (it % 3 == 0) and (it % 5 == 0):
print("fizzbuzz")
if (it % 3 == 0) and (it % 5 != 0):
print("fizz")
if (it % 3 != 0) and (it % 5 == 0):
print("buzz")
if (it % 3 != 0) and (it % 5 != 0):
print(it)
it = it + 1
fizzbuzz(20)
## Référence - à priori ne la modifiez pas, s'il vous plaît
import math
def combinatoire(n,k):
a = math.factorial(n)
a = a // math.factorial(k)
a = a // math.factorial(n-k)
return a
## DM - exercice n°7
## Modifiez le code ci-dessous, et ne modifiez pas ces commentaires
import math
def combinatoire(n,k):
a = math.factorial(n)
a = a // math.factorial(k)
a = a // math.factorial(n-k)
return a
print( "C(4,2) =>", combinatoire(4,2) )
print( "C(6,3) =>", combinatoire(6,3) )
## Référence - à priori ne la modifiez pas, s'il vous plaît
def maximum_pair(liste):
for it in liste:
if it % 2 == 0:
est_le_plus_grand = True
for it2 in liste:
if (it2 % 2 == 0) and it2 > it:
est_le_plus_grand = False
break
if est_le_plus_grand == True:
return it
return 0
## DM - exercice n°8
## Modifiez le code ci-dessous, et ne modifiez pas ces commentaires
def maximum_pair(liste):
for it in liste:
if it % 2 == 0:
est_le_plus_grand = True
for it2 in liste:
if (it2 % 2 == 0) and it2 > it:
est_le_plus_grand = False
break
if est_le_plus_grand == True:
return it
return 0
print( "cas 1 =>", maximum_pair([1,2,4,13,12,10,8]))
print( "cas 2 =>", maximum_pair([1,21,41,13,125,109,87]))
## Référence - à priori ne la modifiez pas, s'il vous plaît
def carre_magique(carre_magique):
if len(carre_magique) != 3:
return False
for it_ligne in carre_magique:
if len(it_ligne) != 3:
return False
for it_ligne_1 in carre_magique:
for it_ligne_2 in carre_magique:
if id(it_ligne_1) != id(it_ligne_2):
for it_elt1 in it_ligne_1:
for it_elt2 in it_ligne_2:
if it_elt1 == it_elt2:
return False
somme_ligne_1 = 0
somme_ligne_2 = 0
somme_ligne_3 = 0
ligne_1 = carre_magique[0]
ligne_2 = carre_magique[1]
ligne_3 = carre_magique[2]
for it in ligne_1:
somme_ligne_1 = somme_ligne_1 + it
for it in ligne_2:
somme_ligne_2 = somme_ligne_2 + it
for it in ligne_3:
somme_ligne_3 = somme_ligne_3 + it
if (somme_ligne_1 != somme_ligne_2) or (somme_ligne_1 != somme_ligne_3) or (somme_ligne_2 != somme_ligne_3):
return False
somme_colonne_1 = 0
somme_colonne_2 = 0
somme_colonne_3 = 0
for it in range(3):
somme_colonne_1 = somme_colonne_1 + carre_magique[it][0]
for it in range(3):
somme_colonne_2 = somme_colonne_2 + carre_magique[it][1]
for it in range(3):
somme_colonne_3 = somme_colonne_3 + carre_magique[it][2]
if (somme_colonne_1 != somme_colonne_2) or (somme_colonne_1 != somme_colonne_3) or (somme_colonne_2 != somme_colonne_3):
return False
return True
## DM - exercice n°9
## Modifiez le code ci-dessous, et ne modifiez pas ces commentaires
def carre_magique(carre_magique):
if len(carre_magique) != 3:
return False
for it_ligne in carre_magique:
if len(it_ligne) != 3:
return False
for it_ligne_1 in carre_magique:
for it_ligne_2 in carre_magique:
if id(it_ligne_1) != id(it_ligne_2):
for it_elt1 in it_ligne_1:
for it_elt2 in it_ligne_2:
if it_elt1 == it_elt2:
return False
somme_ligne_1 = 0
somme_ligne_2 = 0
somme_ligne_3 = 0
ligne_1 = carre_magique[0]
ligne_2 = carre_magique[1]
ligne_3 = carre_magique[2]
for it in ligne_1:
somme_ligne_1 = somme_ligne_1 + it
for it in ligne_2:
somme_ligne_2 = somme_ligne_2 + it
for it in ligne_3:
somme_ligne_3 = somme_ligne_3 + it
if (somme_ligne_1 != somme_ligne_2) or (somme_ligne_1 != somme_ligne_3) or (somme_ligne_2 != somme_ligne_3):
return False
somme_colonne_1 = 0
somme_colonne_2 = 0
somme_colonne_3 = 0
for it in range(3):
somme_colonne_1 = somme_colonne_1 + carre_magique[it][0]
for it in range(3):
somme_colonne_2 = somme_colonne_2 + carre_magique[it][1]
for it in range(3):
somme_colonne_3 = somme_colonne_3 + carre_magique[it][2]
if (somme_colonne_1 != somme_colonne_2) or (somme_colonne_1 != somme_colonne_3) or (somme_colonne_2 != somme_colonne_3):
return False
return True
print( carre_magique([[2,9,4],
[7,5,3],
[6,1,8]]))
print( carre_magique([[2,9,4],
[7,5,10],
[6,1,8]]))
print( carre_magique([[1,1,1],
[1,1,1],
[1,1,1]]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare the Fashion-MNIST data
Step2: Create the discriminator (the critic in the original WGAN)
Step3: Create the generator
Step5: Create the WGAN-GP model
Step6: Create a Keras callback that periodically saves generated images
Step7: Train the end-to-end model
Step8: Display the last generated images
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
IMG_SHAPE = (28, 28, 1)
BATCH_SIZE = 512
# Size of the noise vector
noise_dim = 128
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
print(f"Number of examples: {len(train_images)}")
print(f"Shape of the images in the dataset: {train_images.shape[1:]}")
# Reshape each sample to (28, 28, 1) and normalize the pixel values in the [-1, 1] range
train_images = train_images.reshape(train_images.shape[0], *IMG_SHAPE).astype("float32")
train_images = (train_images - 127.5) / 127.5
def conv_block(
x,
filters,
activation,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
use_bias=True,
use_bn=False,
use_dropout=False,
drop_value=0.5,
):
x = layers.Conv2D(
filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias
)(x)
if use_bn:
x = layers.BatchNormalization()(x)
x = activation(x)
if use_dropout:
x = layers.Dropout(drop_value)(x)
return x
def get_discriminator_model():
img_input = layers.Input(shape=IMG_SHAPE)
# Zero pad the input to make the input images size to (32, 32, 1).
x = layers.ZeroPadding2D((2, 2))(img_input)
x = conv_block(
x,
64,
kernel_size=(5, 5),
strides=(2, 2),
use_bn=False,
use_bias=True,
activation=layers.LeakyReLU(0.2),
use_dropout=False,
drop_value=0.3,
)
x = conv_block(
x,
128,
kernel_size=(5, 5),
strides=(2, 2),
use_bn=False,
activation=layers.LeakyReLU(0.2),
use_bias=True,
use_dropout=True,
drop_value=0.3,
)
x = conv_block(
x,
256,
kernel_size=(5, 5),
strides=(2, 2),
use_bn=False,
activation=layers.LeakyReLU(0.2),
use_bias=True,
use_dropout=True,
drop_value=0.3,
)
x = conv_block(
x,
512,
kernel_size=(5, 5),
strides=(2, 2),
use_bn=False,
activation=layers.LeakyReLU(0.2),
use_bias=True,
use_dropout=False,
drop_value=0.3,
)
x = layers.Flatten()(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(1)(x)
d_model = keras.models.Model(img_input, x, name="discriminator")
return d_model
d_model = get_discriminator_model()
d_model.summary()
def upsample_block(
x,
filters,
activation,
kernel_size=(3, 3),
strides=(1, 1),
up_size=(2, 2),
padding="same",
use_bn=False,
use_bias=True,
use_dropout=False,
drop_value=0.3,
):
x = layers.UpSampling2D(up_size)(x)
x = layers.Conv2D(
filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias
)(x)
if use_bn:
x = layers.BatchNormalization()(x)
if activation:
x = activation(x)
if use_dropout:
x = layers.Dropout(drop_value)(x)
return x
def get_generator_model():
noise = layers.Input(shape=(noise_dim,))
x = layers.Dense(4 * 4 * 256, use_bias=False)(noise)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Reshape((4, 4, 256))(x)
x = upsample_block(
x,
128,
layers.LeakyReLU(0.2),
strides=(1, 1),
use_bias=False,
use_bn=True,
padding="same",
use_dropout=False,
)
x = upsample_block(
x,
64,
layers.LeakyReLU(0.2),
strides=(1, 1),
use_bias=False,
use_bn=True,
padding="same",
use_dropout=False,
)
x = upsample_block(
x, 1, layers.Activation("tanh"), strides=(1, 1), use_bias=False, use_bn=True
)
# At this point, we have an output which has the same shape as the input, (32, 32, 1).
# We will use a Cropping2D layer to make it (28, 28, 1).
x = layers.Cropping2D((2, 2))(x)
g_model = keras.models.Model(noise, x, name="generator")
return g_model
g_model = get_generator_model()
g_model.summary()
class WGAN(keras.Model):
def __init__(
self,
discriminator,
generator,
latent_dim,
discriminator_extra_steps=3,
gp_weight=10.0,
):
super(WGAN, self).__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.d_steps = discriminator_extra_steps
self.gp_weight = gp_weight
def compile(self, d_optimizer, g_optimizer, d_loss_fn, g_loss_fn):
super(WGAN, self).compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.d_loss_fn = d_loss_fn
self.g_loss_fn = g_loss_fn
def gradient_penalty(self, batch_size, real_images, fake_images):
Calculates the gradient penalty.
This loss is calculated on an interpolated image
and added to the discriminator loss.
# Get the interpolated image
alpha = tf.random.normal([batch_size, 1, 1, 1], 0.0, 1.0)
diff = fake_images - real_images
interpolated = real_images + alpha * diff
with tf.GradientTape() as gp_tape:
gp_tape.watch(interpolated)
# 1. Get the discriminator output for this interpolated image.
pred = self.discriminator(interpolated, training=True)
# 2. Calculate the gradients w.r.t to this interpolated image.
grads = gp_tape.gradient(pred, [interpolated])[0]
# 3. Calculate the norm of the gradients.
norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3]))
gp = tf.reduce_mean((norm - 1.0) ** 2)
return gp
def train_step(self, real_images):
if isinstance(real_images, tuple):
real_images = real_images[0]
# Get the batch size
batch_size = tf.shape(real_images)[0]
# For each batch, we are going to perform the
# following steps as laid out in the original paper:
# 1. Train the generator and get the generator loss
# 2. Train the discriminator and get the discriminator loss
# 3. Calculate the gradient penalty
# 4. Multiply this gradient penalty with a constant weight factor
# 5. Add the gradient penalty to the discriminator loss
# 6. Return the generator and discriminator losses as a loss dictionary
# Train the discriminator first. The original paper recommends training
# the discriminator for `x` more steps (typically 5) as compared to
# one step of the generator. Here we will train it for 3 extra steps
# as compared to 5 to reduce the training time.
for i in range(self.d_steps):
# Get the latent vector
random_latent_vectors = tf.random.normal(
shape=(batch_size, self.latent_dim)
)
with tf.GradientTape() as tape:
# Generate fake images from the latent vector
fake_images = self.generator(random_latent_vectors, training=True)
# Get the logits for the fake images
fake_logits = self.discriminator(fake_images, training=True)
# Get the logits for the real images
real_logits = self.discriminator(real_images, training=True)
# Calculate the discriminator loss using the fake and real image logits
d_cost = self.d_loss_fn(real_img=real_logits, fake_img=fake_logits)
# Calculate the gradient penalty
gp = self.gradient_penalty(batch_size, real_images, fake_images)
# Add the gradient penalty to the original discriminator loss
d_loss = d_cost + gp * self.gp_weight
# Get the gradients w.r.t the discriminator loss
d_gradient = tape.gradient(d_loss, self.discriminator.trainable_variables)
# Update the weights of the discriminator using the discriminator optimizer
self.d_optimizer.apply_gradients(
zip(d_gradient, self.discriminator.trainable_variables)
)
# Train the generator
# Get the latent vector
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
with tf.GradientTape() as tape:
# Generate fake images using the generator
generated_images = self.generator(random_latent_vectors, training=True)
# Get the discriminator logits for fake images
gen_img_logits = self.discriminator(generated_images, training=True)
# Calculate the generator loss
g_loss = self.g_loss_fn(gen_img_logits)
# Get the gradients w.r.t the generator loss
gen_gradient = tape.gradient(g_loss, self.generator.trainable_variables)
# Update the weights of the generator using the generator optimizer
self.g_optimizer.apply_gradients(
zip(gen_gradient, self.generator.trainable_variables)
)
return {"d_loss": d_loss, "g_loss": g_loss}
class GANMonitor(keras.callbacks.Callback):
def __init__(self, num_img=6, latent_dim=128):
self.num_img = num_img
self.latent_dim = latent_dim
def on_epoch_end(self, epoch, logs=None):
random_latent_vectors = tf.random.normal(shape=(self.num_img, self.latent_dim))
generated_images = self.model.generator(random_latent_vectors)
generated_images = (generated_images * 127.5) + 127.5
for i in range(self.num_img):
img = generated_images[i].numpy()
img = keras.preprocessing.image.array_to_img(img)
img.save("generated_img_{i}_{epoch}.png".format(i=i, epoch=epoch))
# Instantiate the optimizer for both networks
# (learning_rate=0.0002, beta_1=0.5 are recommended)
generator_optimizer = keras.optimizers.Adam(
learning_rate=0.0002, beta_1=0.5, beta_2=0.9
)
discriminator_optimizer = keras.optimizers.Adam(
learning_rate=0.0002, beta_1=0.5, beta_2=0.9
)
# Define the loss functions for the discriminator,
# which should be (fake_loss - real_loss).
# We will add the gradient penalty later to this loss function.
def discriminator_loss(real_img, fake_img):
real_loss = tf.reduce_mean(real_img)
fake_loss = tf.reduce_mean(fake_img)
return fake_loss - real_loss
# Define the loss functions for the generator.
def generator_loss(fake_img):
return -tf.reduce_mean(fake_img)
# Set the number of epochs for trainining.
epochs = 20
# Instantiate the customer `GANMonitor` Keras callback.
cbk = GANMonitor(num_img=3, latent_dim=noise_dim)
# Instantiate the WGAN model.
wgan = WGAN(
discriminator=d_model,
generator=g_model,
latent_dim=noise_dim,
discriminator_extra_steps=3,
)
# Compile the WGAN model.
wgan.compile(
d_optimizer=discriminator_optimizer,
g_optimizer=generator_optimizer,
g_loss_fn=generator_loss,
d_loss_fn=discriminator_loss,
)
# Start training the model.
wgan.fit(train_images, batch_size=BATCH_SIZE, epochs=epochs, callbacks=[cbk])
from IPython.display import Image, display
display(Image("generated_img_0_19.png"))
display(Image("generated_img_1_19.png"))
display(Image("generated_img_2_19.png"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-1', 'sandbox-3', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: INTRO
Step2: architecture.json
Step3: RELEVANT HDF5 FILES
Step4: validation = h5py.File('../data/hdf5datasets/validation.h5', 'r')
Step5: test = h5py.File('../data/hdf5datasets/test.h5', 'r')
Step6: Examining the 'info' track
Step7: Jupyter Notebook as a Documentation Resource
Step8: Note
|
<ASSISTANT_TASK:>
Python Code:
from scipy import stats
import h5py
! cat configurations.json
! cat architecture.json
train = h5py.File('../data/hdf5datasets/NSMSDSRSCSTSRI_500bp/train.h5', 'r')
train.items()
validation = h5py.File('../data/hdf5datasets/NSMSDSRSCSTSRI_500bp/validation.h5', 'r')
validation.items()
test = h5py.File('../data/hdf5datasets/NSMSDSRSCSTSRI_500bp/test.h5', 'r')
test.items()
infoRef_test = test.get('info')[:]
stats.describe(infoRef_test[:, 0])
stats.describe(infoRef_test[:, 1])
stats.describe(infoRef_test[:, 2])
stats.describe(infoRef_test[:, 3])
print()
infoRef_validation = validation.get('info')[:]
stats.describe(infoRef_validation[:, 0])
stats.describe(infoRef_validation[:, 1])
stats.describe(infoRef_validation[:, 2])
stats.describe(infoRef_validation[:, 3])
print()
infoRef_train = train.get('info')[:]
stats.describe(infoRef_train[:, 0])
stats.describe(infoRef_train[:, 1])
stats.describe(infoRef_train[:, 2])
stats.describe(infoRef_train[:, 3])
import main, models, visualization
?main
??models.
?visualization
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Section 2
Step2: Section 3
Step3: Display variable power
Step4: Use parser function from dateutil module to convert the timestamps in power from a string to a datetime - Timestamp object
Step5: Select 15 minute time interval data and re-store in power
Step6: Use group by function of Pandas dataframe to group the power data based on
Step7: Use .sum() attribute of group object to sum up the Average power based on the group variables defined above
Step8: Plot the graph of Total power over time
Step9: Section 4
Step10: Find the Design matrix for the Total lighting consumption
Step11: Define a function 'Beta_hat' which will take a design matrix and a power vector as arguments and outputs the Beta hat values as defined by the function inverse(Xt * X) * Xt * Y where Xt is the transpose of the design matrix and Y is the power vector
Step12: Finding Beta hat for Total lighting consumption and calculating predicted power. Here the data set used for training and testing the regression model is the same.
Step13: Defining function Cal_Rsqr which takes arguments Actual power and Predicted power and then calculates & returns the R squared value
Step14: Call function Cal_Rsqr for the total lighting consumption
Step15: Section 5
Step16: Removing the first 4 days of data to allow the train and test datasets to start at the same 15 minute time interval of the week
Step17: Generating design matrices for train and test dataset by calling fucntion DesignMatrix
Step18: Calculating Beta hat for train data set
Step19: Estimate predicted power using beta hat and test matrix. Calculate R square value
Step20: Plot graph of Actual power versus Predicted power with a common time axis
Step21: Section 6
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import scipy.spatial.distance as dist
%matplotlib inline
import glob
allFiles = glob.glob('4months' + "/*.csv")
frame = pd.DataFrame()
list_ = []
for file_ in allFiles:
df = pd.read_csv(file_,index_col=None, header=0)
list_.append(df)
frame = pd.concat(list_)
frame.reset_index(drop=True)
frame = frame.iloc[:,(5,4,2,11)]
scheck = lambda d: 'Lights' in d or 'Light' in d or 'Lts' in d
S_ind = list(map(scheck,frame.BranchName))
power = frame[S_ind].reset_index(drop=True)
power
from dateutil import parser
power.DateStamp = power.DateStamp.apply(parser.parse)
intervals = lambda d: (d.time().minute)%15 == 0
indexes_15 = list(map(intervals,power.DateStamp))
power = power[indexes_15].reset_index(drop=True)
P_Total = power.groupby(['DateStamp'], as_index=False)
P_Panel = power.groupby(['DateStamp','PanelName'],as_index=False)
Lighting_Panel = P_Panel.sum()
Lighting_Panel.columns = ['Timestamp','PanelName','AvgPower']
Lighting_Panel
Lighting_Total = P_Total.sum()
Lighting_Total.columns = ['Timestamp','AvgPower']
Lighting_Total
plt.figure(figsize=(10,10))
plt.plot(Lighting_Total.Timestamp,Lighting_Total.AvgPower)
plt.xlabel('Time stamp (days)')
plt.ylabel('Power [ in Watts]')
import math
def DesignMatrix(timestamps):
tslen = len(timestamps)
ind = 672
num = math.ceil(tslen/ind)
sing = np.identity(ind)
Dmat = np.tile(sing,(num,1))[0:tslen,:]
return Dmat
DMX = DesignMatrix(Lighting_Total.Timestamp)
def beta_hat(X,Y):
B = np.dot(np.dot(np.linalg.inv(np.dot(X.T,X)),X.T),Y)
return B
Act_power = Lighting_Total.AvgPower
B_Lighting = beta_hat(DMX,Actual_power)
Pred_power = np.dot(DMX,B_Lighting)
def Cal_Rsqr(Actual_power,Predict_power):
Power_mean = np.mean(Actual_power)
Numer = Actual_power - Predict_power
Denom = Actual_power - Power_mean
R_sqr = 1- (np.dot(Numer.T,Numer)/np.dot(Denom.T,Denom))
return R_sqr
Cal_Rsqr(Act_power,Pred_power)
W_check = lambda d : d.isocalendar()[1]%2 == 1
W_indices = list(map(W_check, Lighting_Total.Timestamp))
Train_Lighting = Lighting_Total[W_indices]
Test_Lighting = Lighting_Total[np.invert(W_indices)]
Train_Lighting = Train_Lighting.iloc[384:,:]
TrainDMX = DesignMatrix(Train_Lighting.Timestamp)
TestDMX = DesignMatrix(Test_Lighting.Timestamp)
LBs = beta_hat(TrainDMX, Train_Lighting.AvgPower)
Lighting_predpower = np.dot(TestDMX,LBs)
Lighting_actpower = Test_Lighting.AvgPower
Cal_Rsqr(Lighting_actpower,Lighting_predpower)
plt.figure(figsize=(15,15))
plt.plot(Test_Lighting.Timestamp,Lighting_actpower,Test_Lighting.Timestamp,Lighting_predpower)
plt.xlabel('Time stamp (days)')
plt.ylabel('Power [ in Watts]')
count = 0
for name in (Lighting_Panel.PanelName):
Data = Lighting_Panel[Lighting_Panel.PanelName == name]
count = count + 1
W_check = lambda d : d.isocalendar()[1]%2 == 1
W_indices = list(map(W_check, Data.Timestamp))
Train_dat = Data[W_indices].iloc[384:,:]
Test_dat = Data[np.invert(W_indices)]
TrainDMX = DesignMatrix(Train_dat.Timestamp)
TestDMX = DesignMatrix(Test_dat.Timestamp)
LB = beta_hat(TrainDMX,Train_dat.AvgPower)
Lighting_actpower = Train_dat.AvgPower
Lighting_predpower = np.dot(TrainDMX,LB)
R_train_panel = Cal_Rsqr(Lighting_actpower,Lighting_predpower)
print ('R square value for prediction on train data for panel ' + name + ' is ' + str(R_train_panel))
Lighting_actpower = Test_dat.AvgPower
Lighting_predpower = np.dot(TestDMX,LB)
R_test_panel = Cal_Rsqr(Lighting_actpower,Lighting_predpower)
print ('R square value for prediction on test data for panel ' + name + ' is ' + str(R_test_panel))
if (count == 7):
plt.plot(Test_dat.Timestamp,Lighting_actpower,Test_dat.Timestamp,Lighting_predpower)
plt.xlabel('Time stamp (days)')
plt.ylabel('Power [ in Watts]')
break;
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There is something I should mention here. python 2 can trip people up when trying to do something like the following
Step2: This is because python defaults to returning an integer (rounded down) when you ask to divide two integers. Read more about this here
Step3: Floating point numbers
Step4: So you might think that floats are simply numbers that have decimal parts but...
Step5: The python docs discuss this behavior
Step6: Strings
Step8: Strings can use single, double, and triple quotes
Step9: It's useful that we can use all types of quotes as it allows us to have strings with quote's inside them.
Step10: One of the most common built-in functions you'll use is len(), as you might imagine it returns the length of the argument you pass it
Step11: These methods on strings allow us to modify and ask questions about a string.
Step12: Here are some examples of what we can do with these methods
Step13: Lists
Step14: Just like strings there are methods available to us to work with lists
Step15: Let's take a look at how these work. We'll start off with a list of two names, alice and bob. From there we'll use each of the methods to modify the 'names' list.
Step16: append() will add the argument to the end of the list
Step17: We'll append again to show off the next method
Step18: count() tells us how many times the argument occurs in the list
Step19: append() only adds a single item at a time, if we want to extend our original list by several items we can use the extend() method and pass in a list of things to add to the end.
Step20: We can find the position of an item using index(), remember lists start counting at 0
Step21: We can use insert() to put an item at a specific position in the list
Step22: pop() can be used for a couple of things, if we simply need to remove the last item from the list we can call it by itself
Step23: But, we can also keep that last item in another variable
Step24: remove() will remove the 1st occurance of the argument we give it. Notice that alice and mike are now next to each other and the last bob is still in the list
Step25: reverse() does pretty much what you'd expect it to
Step26: As does sort()
Step27: Interlude
Step28: But this will work for other types as well
Step29: Tuples
Step30: We normally use parentheses to define a tuple but really any object followed by a comma becomes a tuple. Either way, python will add the parentheses for us anyway
Step31: Probably the most important difference between a list and a tuple has to do with 'immutability.' Let's take a look at an example
Step32: So far, not much difference. But lets say that we wanted to get rid of bob and replace him with eve.
Step33: Uh-oh python has told us that the tuple does not allow us to 'mute' an item in the tuple the way we can with a list. In other words lists are mutable, tuples are immutable.
Step34: As a result of the immutability of tuples we don't have many built in methods.
Step35: Here we have a relationship between pairs of strings, each pair is seperated by a '
Step36: Good, when I use a key to select from the dictionary I get the value associated with that key as a response. Let's try another way
Step37: An important thing to note about dictionaries is that while they are similar to lists and tuples they are unordered.
Step38: Dictionaries are mutable like a list so we can change the relationship of a pair like this
Step39: We can also add pairs
Step40: But for now let's keep the dictionary as a set of english words mapped to thier spanish translations
Step41: Let's take a look at the methods available to us for dictionaries
Step42: copy() will return a "shallow copy" of the dictionary. I won't get into detail here but if you'd like more information see
Step43: fromkeys() will take the keys from one dict and make a new dict with the same keys but with the keys that we specify
Step44: get() will pull the value from a dictionary
Step45: What's useful about the get() method is that we can specify a default value in the case that what we are asking for doesn't exist yet in the dictionary. This can avoid errors
Step46: We can also ask if a key exists using has_key()
Step47: Although in this example we could have gotten the same result by doing the following
Step48: Even though there are methods available to us (the ones we can see with dir()) there may be built-in tools of the language that may be a better choice.
Step49: iteritems() gives an item that we can call .next() on. This is valuable in the case that don't want to load the entire dictionary into memory but still want to iterate through the items.
Step50: We can do the same with the keys using iterkeys()
Step51: We can remove a key and return the value using pop()
Step52: popitem() will remove a associaton and return it as a tuple but you don't get to pick which item you'd like to pop out!
Step53: setdefault() works a bit like get() but will set the value for us if it doesn't exist in the dictionary
Step54: update() allows us to add values from another dictionary
Step55: We can see all the values from a dictionary using values()
Step56: These next methods, viewitems(), viewkeys() and viewvalues() each return a dictionary view object. The python docs discuss thier purpose
Step57: We skipped over clear() but here's a good time to see what it does, clear the dictionary out!
|
<ASSISTANT_TASK:>
Python Code:
1
-5
print 2 + 10 # addition
print 5 - 3 # subtraction
print 6 * 4 # multiplication
print 10 / 5 # division
print 2**4 # exponents
2 / 3
2 / 3.0
1.5
type(1.5)
0.1 + 0.2
from decimal import Decimal
Decimal('0.1') + Decimal('0.2')
'Hello python learners'
print 'Hello'
print "there"
print '''python'''
print learners!
print "This string contains single quotes but that's ok since it's surrounded by double quotes"
print 'This string is surrounded by single quotes. The cow says: "mooo"'
print '''This string want's to mix both "types" of quotes and that's ok since we surrounded it with triple quotes! '''
"We can " + "concatenate strings " + "together using the + operator"
first = "Sometimes it's better "
middle = "to assign parts of a long string "
last = "to variables then concatenate the strings by the variable names"
sentence = first + middle + last
print sentence
("And sometimes "
"we can split a string "
"on seperate lines and they will be "
"put together since they are surrounded by parentheses!")
name = "Eve"
len(name)
for i in dir('Hello'):
if not i.startswith('_'):
print i
word = "hello"
print "capitalize:", word.capitalize() # capitalize the first letter of the string
print "count:", word.count('l') # count how many times the string we pass as an argument appear in 'word'
print "endswith:", word.endswith('o') # T/F if it ends with the string we pass as an argument
print "index:", word.index('o') # Returns index of the string we pass as an argument (remember indexes start at 0)
print "isalpha:", word.isalpha() # methods that start with 'is' give us a clue that the method returns True or False
print "upper:", word.upper() # changes all letters of the string to uppercase
word_two = "HeLlO"
print "swapcase:", word_two.swapcase() # for every letter in the string, swap between upper and lower case
name = "guido van rossum"
print "title:", name.title() # Assumes the string is a name and will change the first letter of each word to uppercase
sentence = "The quick brown fox"
print "split:", sentence.split() # Splits the string into individual words grouped into a list.
a = [1, 2, 3]
print a
print len(a)
b = [1, 'one', 1.0]
print b
print len(b)
c = [[1, 2, 3], ['one', 'two', 'three'], [1.0, 2.0, 3.0]]
print c
print len(c)
for i in dir([]):
if not i.startswith('_'):
print i
names = ['alice', 'bob']
names
names.append('eve')
names
names.append('bob')
names
print "The word 'bob' is seen:", names.count('bob')
names.extend(['bill', 'sally'])
names
print "'sally' is at index:", names.index('sally')
names.insert(2, 'mike')
names
names.pop()
names
last_person = names.pop()
print names
print last_person
names.remove('bob')
names
names.reverse()
names
names.sort()
names
print names
print names[0]
'alice'[3]
a = (1, 2, 3)
print a
print type(a)
a = "example",
print type(a)
print a
names = ['alice', 'bob']
people = ('alice', 'bob')
print names
print people
names[1] = 'eve'
names
people[1] = 'eve'
for i in dir(()):
if not i.startswith('_'):
print i
eng_to_spn = {'one': 'uno',
'two': 'dos',
'three': 'tres'}
eng_to_spn
eng_to_spn['one']
eng_to_spn[0]
for key in eng_to_spn:
print key
eng_to_spn['one'] = 1
eng_to_spn['two'] = 2
eng_to_spn['three'] = 3
eng_to_spn
eng_to_spn['four'] = 4
eng_to_spn
eng_to_spn = {'one': 'uno',
'two': 'dos',
'three': 'tres'}
eng_to_spn
for i in dir({}):
if not i.startswith('_'):
print i
eng_to_spn2 = eng_to_spn.copy()
eng_to_spn2
eng_to_spn3 = eng_to_spn.fromkeys(eng_to_spn, 'english')
eng_to_spn3
eng_to_spn.get('one')
eng_to_spn['four']
print eng_to_spn.get('four', None)
eng_to_spn.has_key('four')
'four' in eng_to_spn
eng_to_spn.items()
items = eng_to_spn.iteritems()
print items.next()
print items.next()
keys = eng_to_spn.iterkeys()
print keys.next()
print keys.next()
three = eng_to_spn.pop('three')
print three
eng_to_spn
anything = eng_to_spn.popitem()
print anything
eng_to_spn
eng_to_spn.setdefault('four', 'quatro')
eng_to_spn
new_numbers = {'five': 'cinco', 'six': 'seis'}
eng_to_spn.update(new_numbers)
eng_to_spn
eng_to_spn.values()
eng_to_spn.viewitems()
eng_to_spn.viewkeys()
eng_to_spn.viewvalues()
eng_to_spn.clear()
eng_to_spn
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Placement of ticks and custom tick labels
Step2: There are a number of more advanced methods for controlling major and minor tick placement in matplotlib figures, such as automatic placement according to different policies. See http
Step3: Axis number and axis label spacing
Step4: Axis position adjustments
Step5: Axis grid
Step6: Axis spines
Step7: Twin axes
Step8: Axes where x and y is zero
Step9: Other 2D plot styles
Step10: Text annotation
Step11: Figures with multiple subplots and insets
Step12: subplot2grid
Step13: gridspec
Step14: add_axes
Step15: Colormap and contour figures
Step16: pcolor
Step17: imshow
Step18: contour
Step19: 3D figures
Step20: Surface plots
Step21: Wire-frame plot
Step22: Coutour plots with projections
|
<ASSISTANT_TASK:>
Python Code:
fig, axes = plt.subplots(1, 2, figsize=(10,4))
axes[0].plot(x, x**2, x, np.exp(x))
axes[0].set_title("Normal scale")
axes[1].plot(x, x**2, x, np.exp(x))
axes[1].set_yscale("log")
axes[1].set_title("Logarithmic scale (y)");
fig, ax = plt.subplots(figsize=(10, 4))
ax.plot(x, x**2, x, x**3, lw=2)
ax.set_xticks([1, 2, 3, 4, 5])
ax.set_xticklabels([r'$\alpha$', r'$\beta$', r'$\gamma$', r'$\delta$', r'$\epsilon$'], fontsize=18)
yticks = [0, 50, 100, 150]
ax.set_yticks(yticks)
ax.set_yticklabels(["$%.1f$" % y for y in yticks], fontsize=18); # use LaTeX formatted labels
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_title("scientific notation")
ax.set_yticks([0, 50, 100, 150])
from matplotlib import ticker
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1,1))
ax.yaxis.set_major_formatter(formatter)
# distance between x and y axis and the numbers on the axes
matplotlib.rcParams['xtick.major.pad'] = 5
matplotlib.rcParams['ytick.major.pad'] = 5
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_yticks([0, 50, 100, 150])
ax.set_title("label and axis spacing")
# padding between axis label and axis numbers
ax.xaxis.labelpad = 5
ax.yaxis.labelpad = 5
ax.set_xlabel("x")
ax.set_ylabel("y");
# restore defaults
matplotlib.rcParams['xtick.major.pad'] = 3
matplotlib.rcParams['ytick.major.pad'] = 3
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_yticks([0, 50, 100, 150])
ax.set_title("title")
ax.set_xlabel("x")
ax.set_ylabel("y")
fig.subplots_adjust(left=0.15, right=.9, bottom=0.1, top=0.9);
fig, axes = plt.subplots(1, 2, figsize=(10,3))
# default grid appearance
axes[0].plot(x, x**2, x, x**3, lw=2)
axes[0].grid(True)
# custom grid appearance
axes[1].plot(x, x**2, x, x**3, lw=2)
axes[1].grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
fig, ax = plt.subplots(figsize=(6,2))
ax.spines['bottom'].set_color('blue')
ax.spines['top'].set_color('blue')
ax.spines['left'].set_color('red')
ax.spines['left'].set_linewidth(2)
# turn off axis spine to the right
ax.spines['right'].set_color("none")
ax.yaxis.tick_left() # only ticks on the left side
fig, ax1 = plt.subplots()
ax1.plot(x, x**2, lw=2, color="blue")
ax1.set_ylabel(r"area $(m^2)$", fontsize=18, color="blue")
for label in ax1.get_yticklabels():
label.set_color("blue")
ax2 = ax1.twinx()
ax2.plot(x, x**3, lw=2, color="red")
ax2.set_ylabel(r"volume $(m^3)$", fontsize=18, color="red")
for label in ax2.get_yticklabels():
label.set_color("red")
fig, ax = plt.subplots()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0)) # set position of x spine to x=0
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0)) # set position of y spine to y=0
xx = np.linspace(-0.75, 1., 100)
ax.plot(xx, xx**3);
n = np.array([0,1,2,3,4,5])
fig, axes = plt.subplots(1, 4, figsize=(12,3))
axes[0].scatter(xx, xx + 0.25*np.random.randn(len(xx)))
axes[0].set_title("scatter")
axes[1].step(n, n**2, lw=2)
axes[1].set_title("step")
axes[2].bar(n, n**2, align="center", width=0.5, alpha=0.5)
axes[2].set_title("bar")
axes[3].fill_between(x, x**2, x**3, color="green", alpha=0.5);
axes[3].set_title("fill_between");
fig, ax = plt.subplots()
ax.plot(xx, xx**2, xx, xx**3)
ax.text(0.15, 0.2, r"$y=x^2$", fontsize=20, color="blue")
ax.text(0.65, 0.1, r"$y=x^3$", fontsize=20, color="green");
fig, ax = plt.subplots(2, 3)
fig.tight_layout()
fig = plt.figure()
ax1 = plt.subplot2grid((3,3), (0,0), colspan=3)
ax2 = plt.subplot2grid((3,3), (1,0), colspan=2)
ax3 = plt.subplot2grid((3,3), (1,2), rowspan=2)
ax4 = plt.subplot2grid((3,3), (2,0))
ax5 = plt.subplot2grid((3,3), (2,1))
fig.tight_layout()
import matplotlib.gridspec as gridspec
fig = plt.figure()
gs = gridspec.GridSpec(2, 3, height_ratios=[2,1], width_ratios=[1,2,1])
for g in gs:
ax = fig.add_subplot(g)
fig.tight_layout()
fig, ax = plt.subplots()
ax.plot(xx, xx**2, xx, xx**3)
fig.tight_layout()
# inset
inset_ax = fig.add_axes([0.2, 0.55, 0.35, 0.35]) # X, Y, width, height
inset_ax.plot(xx, xx**2, xx, xx**3)
inset_ax.set_title('zoom near origin')
# set axis range
inset_ax.set_xlim(-.2, .2)
inset_ax.set_ylim(-.005, .01)
# set axis tick locations
inset_ax.set_yticks([0, 0.005, 0.01])
inset_ax.set_xticks([-0.1,0,.1]);
alpha = 0.7
phi_ext = 2 * np.pi * 0.5
def flux_qubit_potential(phi_m, phi_p):
return 2 + alpha - 2 * np.cos(phi_p) * np.cos(phi_m) - alpha * np.cos(phi_ext - 2*phi_p)
phi_m = np.linspace(0, 2*np.pi, 100)
phi_p = np.linspace(0, 2*np.pi, 100)
X,Y = np.meshgrid(phi_p, phi_m)
Z = flux_qubit_potential(X, Y).T
fig, ax = plt.subplots()
p = ax.pcolor(X/(2*np.pi), Y/(2*np.pi), Z, cmap=matplotlib.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
fig, ax = plt.subplots()
im = ax.imshow(Z, cmap=matplotlib.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1])
im.set_interpolation('bilinear')
cb = fig.colorbar(im, ax=ax)
fig, ax = plt.subplots()
cnt = ax.contour(Z, cmap=matplotlib.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1])
from mpl_toolkits.mplot3d.axes3d import Axes3D
fig = plt.figure(figsize=(14,6))
# `ax` is a 3D-aware axis instance because of the projection='3d' keyword argument to add_subplot
ax = fig.add_subplot(1, 2, 1, projection='3d')
p = ax.plot_surface(X, Y, Z, rstride=4, cstride=4, linewidth=0)
# surface_plot with color grading and color bar
ax = fig.add_subplot(1, 2, 2, projection='3d')
p = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, linewidth=0, antialiased=False)
cb = fig.colorbar(p, shrink=0.5)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1, 1, 1, projection='3d')
p = ax.plot_wireframe(X, Y, Z, rstride=4, cstride=4)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1,1,1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=4, cstride=4, alpha=0.25)
cset = ax.contour(X, Y, Z, zdir='z', offset=-np.pi, cmap=matplotlib.cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='x', offset=-np.pi, cmap=matplotlib.cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=3*np.pi, cmap=matplotlib.cm.coolwarm)
ax.set_xlim3d(-np.pi, 2*np.pi);
ax.set_ylim3d(0, 3*np.pi);
ax.set_zlim3d(-np.pi, 2*np.pi);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'sandbox-3', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: nbinteract.multiple_choice
Step2: nbinteract.short_answer
|
<ASSISTANT_TASK:>
Python Code:
import nbinteract as nbi
nbi.multiple_choice(question="What is 10 + 2 * 5?",
choices=['12', '60', '20'],
answers=2)
nbi.multiple_choice(question="Select all prime numbers.",
choices=['12', '3', '31'],
answers=[1, 2])
nbi.short_answer('What is 1+1?', answers='2', explanation='1+1 is 2')
nbi.short_answer('Enter the first name of a member of the Beatles.',
['John', 'Paul', 'George', 'Ringo'])
nbi.short_answer('Enter an even number.', lambda x: int(x) % 2 == 0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sections
Step2: Splitting data into 70% training and 30% test data
Step3: Standardizing the features
Step4: <br>
Step5: Training a perceptron model using the standardized training data
Step6: <br>
Step7: Plot cost function
Step8: Regularization path
Step9: <br>
Step10: <br>
Step11: <br>
Step12: [back to top]
Step13: <br>
Step14: <br>
|
<ASSISTANT_TASK:>
Python Code:
%load_ext watermark
%watermark -a 'Sebastian Raschka' -u -d -v -p numpy,pandas,matplotlib,scikit-learn
# to install watermark just uncomment the following line:
#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py
from sklearn import datasets
import numpy as np
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
print('Class labels:', np.unique(y))
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
from sklearn.linear_model import Perceptron
ppn = Perceptron(n_iter=40, eta0=0.1, random_state=0)
ppn.fit(X_train_std, y_train)
y_test.shape
y_pred = ppn.predict(X_test_std)
print('Misclassified samples: %d' % (y_test != y_pred).sum())
from sklearn.metrics import accuracy_score
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
%matplotlib inline
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# highlight test samples
if test_idx:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0], X_test[:, 1], c='',
alpha=1.0, linewidth=1, marker='o',
s=55, label='test set')
%matplotlib inline
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined_std, y=y_combined,
classifier=ppn, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/iris_perceptron_scikit.png', dpi=300)
plt.show()
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7, 7, 0.1)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color='k')
plt.ylim(-0.1, 1.1)
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
# y axis ticks and gridline
plt.yticks([0.0, 0.5, 1.0])
ax = plt.gca()
ax.yaxis.grid(True)
plt.tight_layout()
# plt.savefig('./figures/sigmoid.png', dpi=300)
plt.show()
def cost_1(z):
return - np.log(sigmoid(z))
def cost_0(z):
return - np.log(1 - sigmoid(z))
z = np.arange(-10, 10, 0.1)
phi_z = sigmoid(z)
c1 = [cost_1(x) for x in z]
plt.plot(phi_z, c1, label='J(w) if y=1')
c0 = [cost_0(x) for x in z]
plt.plot(phi_z, c0, linestyle='--', label='J(w) if y=0')
plt.ylim(0.0, 5.1)
plt.xlim([0, 1])
plt.xlabel('$\phi$(z)')
plt.ylabel('J(w)')
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('./figures/log_cost.png', dpi=300)
plt.show()
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=lr, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/logistic_regression.png', dpi=300)
plt.show()
lr.predict_proba(X_test_std[0,:])
weights, params = [], []
for c in np.arange(-5, 5):
lr = LogisticRegression(C=10**c, random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10**c)
weights = np.array(weights)
plt.plot(params, weights[:, 0],
label='petal length')
plt.plot(params, weights[:, 1], linestyle='--',
label='petal width')
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.legend(loc='upper left')
plt.xscale('log')
# plt.savefig('./figures/regression_path.png', dpi=300)
plt.show()
from sklearn.svm import SVC
svm = SVC(kernel='linear', C=1.0, random_state=0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=svm, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/support_vector_machine_linear.png', dpi=300)
plt.show()
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
np.random.seed(0)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
plt.scatter(X_xor[y_xor==1, 0], X_xor[y_xor==1, 1], c='b', marker='x', label='1')
plt.scatter(X_xor[y_xor==-1, 0], X_xor[y_xor==-1, 1], c='r', marker='s', label='-1')
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('./figures/xor.png', dpi=300)
plt.show()
svm = SVC(kernel='rbf', random_state=0, gamma=0.10, C=10.0)
svm.fit(X_xor, y_xor)
plot_decision_regions(X_xor, y_xor,
classifier=svm)
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/support_vector_machine_rbf_xor.png', dpi=300)
plt.show()
from sklearn.svm import SVC
svm = SVC(kernel='rbf', random_state=0, gamma=0.2, C=1.0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=svm, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/support_vector_machine_rbf_iris_1.png', dpi=300)
plt.show()
svm = SVC(kernel='rbf', random_state=0, gamma=100.0, C=1.0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=svm, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/support_vector_machine_rbf_iris_2.png', dpi=300)
plt.show()
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0)
tree.fit(X_train, y_train)
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X_combined, y_combined,
classifier=tree, test_idx=range(105,150))
plt.xlabel('petal length [cm]')
plt.ylabel('petal width [cm]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/decision_tree_decision.png', dpi=300)
plt.show()
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
def gini(p):
return (p)*(1 - (p)) + (1-p)*(1 - (1-p))
def entropy(p):
return - p*np.log2(p) - (1 - p)*np.log2((1 - p))
def error(p):
return 1 - np.max([p, 1 - p])
x = np.arange(0.0, 1.0, 0.01)
ent = [entropy(p) if p != 0 else None for p in x]
sc_ent = [e*0.5 if e else None for e in ent]
err = [error(i) for i in x]
fig = plt.figure()
ax = plt.subplot(111)
for i, lab, ls, c, in zip([ent, sc_ent, gini(x), err],
['Entropy', 'Entropy (scaled)',
'Gini Impurity', 'Misclassification Error'],
['-', '-', '--', '-.'],
['black', 'lightgray', 'red', 'green', 'cyan']):
line = ax.plot(x, i, label=lab, linestyle=ls, lw=2, color=c)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15),
ncol=3, fancybox=True, shadow=False)
ax.axhline(y=0.5, linewidth=1, color='k', linestyle='--')
ax.axhline(y=1.0, linewidth=1, color='k', linestyle='--')
plt.ylim([0, 1.1])
plt.xlabel('p(i=1)')
plt.ylabel('Impurity Index')
plt.tight_layout()
plt.savefig('./figures/impurity.png', dpi=300, bbox_inches='tight')
plt.show()
from sklearn.tree import export_graphviz
export_graphviz(tree,
out_file='tree.dot',
feature_names=['petal length', 'petal width'])
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(criterion='entropy',
n_estimators=10,
random_state=1,
n_jobs=2)
forest.fit(X_train, y_train)
plot_decision_regions(X_combined, y_combined,
classifier=forest, test_idx=range(105,150))
plt.xlabel('petal length [cm]')
plt.ylabel('petal width [cm]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/random_forest.png', dpi=300)
plt.show()
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
knn.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=knn, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/k_nearest_neighbors.png', dpi=300)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What we're doing is creating two vectors, [1.0, 1.0, 1.0, 1.0] and [2.0, 2.0, 2.0, 2.0], and then adding them. Here's equivalent code in raw Python and using numpy
Step2: Details of adding two vectors in TensorFlow
Step3: This version uses constant in a way similar to numpy's fill, specifying the optional shape and having the values copied out across it.
Step4: Adding two matrices
Step5: Recall that you can pass numpy or Python arrays into constant.
Step6: Above, we're taking a 1 x 4 vector [1 0 0 1] and multiplying it by a 4 by 2 matrix full of random values from a normal distribution (mean 0, stdev 1). The output is a 1 x 2 matrix.
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
with tf.Session():
input1 = tf.constant([1.0, 1.0, 1.0, 1.0])
input2 = tf.constant([2.0, 2.0, 2.0, 2.0])
output = tf.add(input1, input2)
result = output.eval()
print result
print [x + y for x, y in zip([1.0] * 4, [2.0] * 4)]
import numpy as np
x, y = np.full(4, 1.0), np.full(4, 2.0)
print "{} + {} = {}".format(x, y, x + y)
import tensorflow as tf
with tf.Session():
input1 = tf.constant(1.0, shape=[4])
input2 = tf.constant(2.0, shape=[4])
input3 = tf.constant(3.0, shape=[4])
output = tf.add(tf.add(input1, input2), input3)
result = output.eval()
print result
with tf.Session():
input1 = tf.constant(1.0, shape=[4])
input2 = tf.constant(2.0, shape=[4])
output = input1 + input2
print output.eval()
import tensorflow as tf
import numpy as np
with tf.Session():
input1 = tf.constant(1.0, shape=[2, 3])
input2 = tf.constant(np.reshape(np.arange(1.0, 7.0, dtype=np.float32), (2, 3)))
output = tf.add(input1, input2)
print output.eval()
#@test {"output": "ignore"}
import tensorflow as tf
import numpy as np
with tf.Session():
input_features = tf.constant(np.reshape([1, 0, 0, 1], (1, 4)).astype(np.float32))
weights = tf.constant(np.random.randn(4, 2).astype(np.float32))
output = tf.matmul(input_features, weights)
print "Input:"
print input_features.eval()
print "Weights:"
print weights.eval()
print "Output:"
print output.eval()
#@test {"output": "ignore"}
import tensorflow as tf
import numpy as np
with tf.Session() as sess:
# Set up two variables, total and weights, that we'll change repeatedly.
total = tf.Variable(tf.zeros([1, 2]))
weights = tf.Variable(tf.random_uniform([1,2]))
# Initialize the variables we defined above.
tf.initialize_all_variables().run()
# This only adds the operators to the graph right now. The assignment
# and addition operations are not performed yet.
update_weights = tf.assign(weights, tf.random_uniform([1, 2], -1.0, 1.0))
update_total = tf.assign(total, tf.add(total, weights))
for _ in range(5):
# Actually run the operation graph, so randomly generate weights and then
# add them into the total. Order does matter here. We need to update
# the weights before updating the total.
sess.run(update_weights)
sess.run(update_total)
print weights.eval(), total.eval()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.1 Analytic results
Step2: 2.2 Numerical methods
Step3: Example usage
Step4: We can display the value and confirm that the algorithm did indeed converge as follows.
Step5: 2.1.1 Comparing the various methods
Step6: ...however the brentq and brenth routines are generally more efficient.
|
<ASSISTANT_TASK:>
Python Code:
# define model parameters
ces_params = {'A0': 1.0, 'L0': 1.0, 'g': 0.02, 'n': 0.03, 's': 0.15,
'delta': 0.05, 'alpha': 0.33, 'sigma': 0.95}
# create an instance of the solow.Model class
ces_model = solowpy.CESModel(params=ces_params)
# check the docstring...
ces_model.steady_state?
ces_model.steady_state
solow.Model.find_steady_state?
k_star, result = ces_model.find_steady_state(1e-6, 1e6, method='bisect', full_output=True)
print("The steady-state value is {}".format(k_star))
print("Did the bisection algorithm coverge? {}".format(result.converged))
valid_methods = ['brenth', 'brentq', 'ridder', 'bisect']
for method in valid_methods:
actual_ss = ces_model.find_steady_state(1e-6, 1e6, method=method)
expected_ss = ces_model.steady_state
print("Steady state value computed using {} is {}".format(method, actual_ss))
print("Absolute error in is {}\n".format(abs(actual_ss - expected_ss)))
valid_methods = ['brenth', 'brentq', 'ridder', 'bisect']
for method in valid_methods:
print("Profiling results using {}:".format(method))
%timeit -n 1 -r 3 ces_model.find_steady_state(1e-6, 1e6, method=method)
print("")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Compute scores
Step2: Plot
Step3: The resulting plots reveal a low level of activity, with a GC-rich motif driving the only signal.
|
<ASSISTANT_TASK:>
Python Code:
if not os.path.isfile('data/hg19.ml.fa'):
subprocess.call('curl -o data/hg19.ml.fa https://storage.googleapis.com/basenji_tutorial_data/hg19.ml.fa', shell=True)
subprocess.call('curl -o data/hg19.ml.fa.fai https://storage.googleapis.com/basenji_tutorial_data/hg19.ml.fa.fai', shell=True)
if not os.path.isdir('models/heart'):
os.mkdir('models/heart')
if not os.path.isfile('models/heart/model_best.h5'):
subprocess.call('curl -o models/heart/model_best.h5 https://storage.googleapis.com/basenji_tutorial_data/model_best.h5', shell=True)
lines = [['index','identifier','file','clip','sum_stat','description']]
lines.append(['0', 'CNhs11760', 'data/CNhs11760.bw', '384', 'sum', 'aorta'])
lines.append(['1', 'CNhs12843', 'data/CNhs12843.bw', '384', 'sum', 'artery'])
lines.append(['2', 'CNhs12856', 'data/CNhs12856.bw', '384', 'sum', 'pulmonic_valve'])
samples_out = open('data/heart_wigs.txt', 'w')
for line in lines:
print('\t'.join(line), file=samples_out)
samples_out.close()
! basenji_sat_bed.py -f data/hg19.ml.fa -l 200 -o output/gata4_sat --rc -t data/heart_wigs.txt models/params_small.json models/heart/model_best.h5 data/gata4.bed
! basenji_sat_plot.py --png -l 200 -o output/gata4_sat/plots -t data/heart_wigs.txt output/gata4_sat/scores.h5
! ls output/gata4_sat/plots
IFrame('output/gata4_sat/plots/seq0_t0.png', width=1200, height=400)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we will start with normalization of the features because size of the house is in different range as compared to number of bedrooms
Step2: Data Preparation
Step3: Now lets predict prices of some houses and compare the result with scikit-learn prediction.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
df = pd.read_csv('ex1data2.txt', header=None)
print(df.head())
#Lets try to visualize the data
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(df[0], df[1], df[2])
ax.set_zlabel('price')
plt.xlabel('size of the house (in square feet)')
plt.ylabel('number of bedrooms')
plt.show()
print('We have 47 houses data')
import numpy as np
#Data preparation
#We are not adding column of ones here because we want to normalize the features first
X = df.drop([2], axis=1).values
y = df[2].values
print(X[:1], y[:1])
def featureNormalize(X):
mu = X.mean(axis=0)
sigma = X.std(axis=0)
X_norm = (X - mu)/sigma
return (X_norm, mu, sigma)
X_norm, mu, sigm = featureNormalize(X)
# now lets add ones to the input feature X for theta0
ones = np.ones((X_norm.shape[0], 1), float)
X = np.concatenate((ones,X_norm), axis=1)
print(X[:1])
#Cost function
def computeCostMulti(X, y, theta):
m = X.shape[0]
hypothesis = X.dot(theta) # h_theta = theta.T * x = theta0*x0 + theta1*x1 + ... + thetan*xn
J = (1/(2*m)) * (np.sum(np.square(hypothesis-y)))
return J
theta = np.zeros(X.shape[1])
J_cost = computeCostMulti(X, y, theta)
print('J_Cost', J_cost)
def gradientDescentMulti(X, y, theta, alpha, num_iters):
m = X.shape[0]
J_history = np.zeros(num_iters)
for iter in np.arange(num_iters):
h = X.dot(theta)
theta = theta - alpha * (1/m) * X.T.dot(h-y)
J_history[iter] = computeCostMulti(X, y, theta)
return theta, J_history
alpha = 0.01;
num_iters = 1000;
theta, J_history = gradientDescentMulti(X, y, theta, alpha, num_iters)
#Lets plot something
plt.xlim(0,num_iters)
plt.plot(J_history)
plt.ylabel('Cost J')
plt.xlabel('Iterations')
plt.show()
print(theta)
from sklearn.linear_model import LinearRegression
clf = LinearRegression()
clf.fit(X, y)
inputXs = np.array([[1, 100, 3], [1, 200, 3]])
sklearnPrediction = clf.predict(inputXs)
gradientDescentPrediction = inputXs.dot(theta)
print(sklearnPrediction, gradientDescentPrediction)
print("Looks Good :D")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import SHL Prediction Module
Step2: shl_sm parameters
Step3: shl_pm Initialization
Step4: MISC - Validation
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import shl_pm
## which month to predictsimulate?
# shl_sm_parm_ccyy_mm = '2017-04'
# shl_sm_parm_ccyy_mm_offset = 1647
# shl_sm_parm_ccyy_mm = '2017-05'
# shl_sm_parm_ccyy_mm_offset = 1708
shl_sm_parm_ccyy_mm = '2017-06'
shl_sm_parm_ccyy_mm_offset = 1769
# shl_sm_parm_ccyy_mm = '2017-07'
# shl_sm_parm_ccyy_mm_offset = 1830
# shl_sm_parm_ccyy_mm = '2017-08'
# shl_sm_parm_ccyy_mm_offset = 1830+61
# shl_sm_parm_ccyy_mm = '2017-09'
# shl_sm_parm_ccyy_mm_offset = 1830+61*2
# shl_sm_parm_ccyy_mm = '2017-10'
# shl_sm_parm_ccyy_mm_offset = 1830+61*3
# shl_sm_parm_ccyy_mm = '2017-11'
# shl_sm_parm_ccyy_mm_offset = 1830+61*4
# shl_sm_parm_ccyy_mm = '2017-12'
# shl_sm_parm_ccyy_mm_offset = 1830+61*5
#----------------------------------
shl_sm_data = pd.read_csv('shl_sm_data/shl_sm_data.csv')
shl_sm_data
shl_pm.shl_initialize(shl_sm_parm_ccyy_mm)
# Upon receiving 11:29:00 second price, to predict till 11:29:49 <- one-step forward price forecasting
for i in range(shl_sm_parm_ccyy_mm_offset, shl_sm_parm_ccyy_mm_offset+50): # use csv data as simulatino
# for i in range(shl_sm_parm_ccyy_mm_offset, shl_sm_parm_ccyy_mm_offset+55): # use csv data as simulatino
print('\n<<<< Record No.: %5d >>>>' % i)
print(shl_sm_data['ccyy-mm'][i]) # format: ccyy-mm
print(shl_sm_data['time'][i]) # format: hh:mm:ss
print(shl_sm_data['bid-price'][i]) # format: integer
######################################################################################################################
# call prediction function, returned result is in 'list' format, i.e. [89400]
shl_sm_prediction_list_local_1 = shl_pm.shl_predict_price_k_step(shl_sm_data['time'][i], shl_sm_data['bid-price'][i],1) # <- one-step forward price forecasting
print(shl_sm_prediction_list_local_1)
######################################################################################################################
# Upon receiving 11:29:50 second price, to predict till 11:30:00 <- ten-step forward price forecasting
for i in range(shl_sm_parm_ccyy_mm_offset+50, shl_sm_parm_ccyy_mm_offset+51): # use csv data as simulation
print('\n<<<< Record No.: %5d >>>>' % i)
print(shl_sm_data['ccyy-mm'][i]) # format: ccyy-mm
print(shl_sm_data['time'][i]) # format: hh:mm:ss
print(shl_sm_data['bid-price'][i]) # format: integer/boost-trap-float
######################################################################################################################
# call prediction function, returned result is in 'list' format, i.e. [89400, 89400, 89400, 89500, 89500, 89500, 89500, 89600, 89600, 89600]
shl_sm_prediction_list_local_k = shl_pm.shl_predict_price_k_step(shl_sm_data['time'][i], shl_sm_data['bid-price'][i],10) # <- ten-step forward price forecasting
print(shl_sm_prediction_list_local_k)
######################################################################################################################
print(shl_sm_prediction_list_local_k)
shl_pm.shl_data_pm_1_step.tail(11)
shl_pm.shl_data_pm_k_step.tail(20)
%matplotlib inline
import matplotlib.pyplot as plt
shl_data_pm_k_step_local = shl_pm.shl_data_pm_k_step.copy()
shl_data_pm_k_step_local.index = shl_data_pm_k_step_local.index + 1
shl_data_pm_k_step_local
# bid is predicted bid-price from shl_pm
plt.figure(figsize=(12,6))
plt.plot(shl_pm.shl_data_pm_k_step['f_current_bid'])
# plt.plot(shl_data_pm_1_step_k_step['f_1_step_pred_price'].shift(1))
plt.plot(shl_data_pm_k_step_local['f_1_step_pred_price'])
# bid is actual bid-price from raw dataset
shl_data_actual_bid_local = shl_sm_data[shl_sm_parm_ccyy_mm_offset:shl_sm_parm_ccyy_mm_offset+61].copy()
shl_data_actual_bid_local.reset_index(inplace=True)
plt.figure(figsize=(12,6))
plt.plot(shl_data_actual_bid_local['bid-price'])
plt.plot(shl_data_pm_k_step_local['f_1_step_pred_price'])
plt.figure(figsize=(12,6))
plt.plot(shl_data_actual_bid_local['bid-price'])
plt.plot(shl_data_pm_k_step_local['f_1_step_pred_price_rounded'])
plt.plot(shl_data_pm_k_step_local['f_1_step_pred_set_price_rounded'])
print('Dynamic Increment : +%d' % shl_pm.shl_global_parm_dynamic_increment)
# pd.concat([shl_data_actual_bid_local['bid-price'], shl_data_pm_k_step_local['f_1_step_pred_price'], shl_data_pm_k_step_local['f_1_step_pred_price'] - shl_data_actual_bid_local['bid-price']], axis=1, join='inner')
pd.concat([shl_data_actual_bid_local['bid-price'].tail(11), shl_data_pm_k_step_local['f_1_step_pred_price'].tail(11), shl_data_pm_k_step_local['f_1_step_pred_price'].tail(11) - shl_data_actual_bid_local['bid-price'].tail(11)], axis=1, join='inner')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot $E_n$ vs $\theta$
Step2: Divide by experimental
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)
import pandas as pd
os.chdir('..')
os.getcwd()
sys.path.append('../scripts/')
import bicorr_plot as bicorr_plot
import bicorr_math as bicorr_math
os.listdir('cgmf/datap/')
Esum_df_meas = pd.read_csv(r'Cf072115_to_Cf072215b/datap/Esum_df.csv',index_col=0)
Esum_df_cgmf = pd.read_csv(r'cgmf/datap/Esum_df.csv',index_col=0)
Esum_df_freya= pd.read_csv(r'freya/datap/Esum_df.csv',index_col=0)
Esum_df_ipol = pd.read_csv(r'ipol/datap/Esum_df.csv',index_col=0)
Esum_df_ipol_noct = pd.read_csv(r'ipol_noct/datap/Esum_df.csv',index_col=0)
Esum_df_ipol
Esum_dfs = [Esum_df_meas, Esum_df_cgmf, Esum_df_freya, Esum_df_ipol, Esum_df_ipol_noct]
legends =['Experiment', 'CGMF', 'FREYA', 'PoliMi', 'PoliMi-No CT']
fmts = ['x', 's', 'D', 'o', '^']
colors = ['#5d269b', '#dd673b', '#80bc31', '#3cbfe0', '#4242f4']
to_plot = [0,1, 2, 3]
line_thickness = 1
ebar_width = 3
fig = plt.figure(figsize=(4,4))
ax = plt.gca()
for i in to_plot:
Esum_df = Esum_dfs[i]
ax.errorbar(Esum_df['th_bin_center'],
Esum_df['Eave'],
yerr=Esum_df['Eave_err'],
fmt=fmts[i],
markeredgewidth=1,
markerfacecolor='none',
elinewidth=line_thickness,
capthick = line_thickness,
capsize = ebar_width,
c=colors[i])
leg = plt.legend([legends[i] for i in to_plot])
leg.get_frame().set_edgecolor('w')
ax.axvspan(0,20,facecolor='gray', alpha=0.2)
ax.set_xlabel('Angle (degrees)')
ax.set_ylabel('$\overline{E_n}$ (MeV)')
ax.set_xlim([0,180])
# ax.set_ylim([2.5,3.3])
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(45))
ax.yaxis.set_major_locator(MultipleLocator(.05))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(15))
ax.yaxis.set_minor_locator(MultipleLocator(0.025))
ax.text(30,2.42,'(a)', size=15, backgroundcolor='white')
plt.tight_layout()
bicorr_plot.save_fig_to_folder('Esum_vs_angle_compare',r'compare/fig')
Esum_exp = Esum_dfs[0]
to_plot = [1,2,3]
fig = plt.figure(figsize=(4,4))
ax = plt.gca()
for i in to_plot:
Esum_df = Esum_dfs[i]
x = Esum_df['th_bin_center']
# y = Esum_df['Eave']/Esum_exp['Eave']
y, yerr = bicorr_math.prop_err_division(Esum_df['Eave'],Esum_df['Eave_err'],
Esum_exp['Eave'],Esum_exp['Eave_err'])
ax.errorbar(x,
y,
yerr=yerr,
fmt=fmts[i],
markeredgewidth=1,
markerfacecolor='none',
elinewidth=line_thickness,
capthick = line_thickness,
capsize = ebar_width,
c=colors[i])
leg = plt.legend([legends[i] for i in to_plot],loc=9)
leg.get_frame().set_edgecolor('w')
plt.axhline(1.0,color='gray', linewidth=1,linestyle='--')
ax.axvspan(0,20,facecolor='gray', alpha=0.2)
ax.set_xlabel('Angle (degrees)')
ax.set_ylabel(r'$\left[\overline{E_n}\right]_{SIM} / \left[\overline{E_n}\right]_{EXP}$')
#plt.ylabel(r'$\Big[ \overline{E_n} \Big]_{\texttt{EXP}}$')
ax.set_xlim([0,180])
# ax.set_ylim([2.5,3.3])
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(45))
ax.yaxis.set_major_locator(MultipleLocator(.02))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(15))
ax.yaxis.set_minor_locator(MultipleLocator(0.005))
ax.text(30,1.02,'(b)', size=15, backgroundcolor='white')
plt.tight_layout()
bicorr_plot.save_fig_to_folder('Esum_vs_angle_diff',r'compare/fig')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Constant-coefficent Poisson equation
Step2: Next, we initialize the RHS. To make life easier, the CellCenterMG2d object has the coordinates of the solution grid (including ghost cells) as mg.x2d and mg.y2d (these are two-dimensional arrays).
Step3: The last setup step is to initialize the solution--this is the starting point for the solve. Usually we just want to start with all zeros, so we use the init_zeros() method
Step4: we can now solve -- there are actually two different techniques we can do here. We can just do pure smoothing on the solution grid using mg.smooth(mg.nlevels-1, N), where N is the number of smoothing iterations. To get the solution N will need to be large and this will take a long time.
Step5: We can access the solution on the finest grid using get_solution()
Step6: we can also get the gradient of the solution
Step7: General linear elliptic equation
Step8: For reference, we'll define a function providing the analytic solution
Step9: Now the coefficents--note that since $\gamma$ is a vector, we have a different function for each component
Step10: and the righthand side function
Step11: Our inhomogeneous boundary conditions require a function that can be evaluated on the boundary to give the value
Step12: Now we can setup our grid object and the coefficients, which are stored as a CellCenter2d object. Note, the coefficients do not need to have the same boundary conditions as $\phi$ (and for real problems, they may not). The one that matters the most is $\beta$, since that will need to be averaged to the edges of the domain, so the boundary conditions on the coefficients are important.
Step13: Now we can setup the multigrid object
Step14: just as before, we specify the righthand side and initialize the solution
Step15: and we can solve it
Step16: We can compare to the true solution
Step17: The norm of the error is
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from __future__ import print_function
import numpy as np
import mesh.boundary as bnd
import mesh.patch as patch
import multigrid.MG as MG
nx = ny = 256
mg = MG.CellCenterMG2d(nx, ny,
xl_BC_type="dirichlet", xr_BC_type="dirichlet",
yl_BC_type="dirichlet", yr_BC_type="dirichlet", verbose=1)
def rhs(x, y):
return -2.0*((1.0-6.0*x**2)*y**2*(1.0-y**2) + (1.0-6.0*y**2)*x**2*(1.0-x**2))
mg.init_RHS(rhs(mg.x2d, mg.y2d))
mg.init_zeros()
mg.solve()
phi = mg.get_solution()
plt.imshow(np.transpose(phi.v()), origin="lower")
gx, gy = mg.get_solution_gradient()
plt.subplot(121)
plt.imshow(np.transpose(gx.v()), origin="lower")
plt.subplot(122)
plt.imshow(np.transpose(gy.v()), origin="lower")
import multigrid.general_MG as gMG
def true(x,y):
return np.cos(np.pi*x/2.0)*np.cos(np.pi*y/2.0)
def alpha(x,y):
return 10.0*np.ones_like(x)
def beta(x,y):
return x*y + 1.0
def gamma_x(x,y):
return np.ones_like(x)
def gamma_y(x,y):
return np.ones_like(x)
def f(x,y):
return -0.5*np.pi*(x + 1.0)*np.sin(np.pi*y/2.0)*np.cos(np.pi*x/2.0) - \
0.5*np.pi*(y + 1.0)*np.sin(np.pi*x/2.0)*np.cos(np.pi*y/2.0) + \
(-np.pi**2*(x*y+1.0)/2.0 + 10.0)*np.cos(np.pi*x/2.0)*np.cos(np.pi*y/2.0)
def xl_func(y):
return np.cos(np.pi*y/2.0)
def yl_func(x):
return np.cos(np.pi*x/2.0)
import mesh.patch as patch
nx = ny = 128
g = patch.Grid2d(nx, ny, ng=1)
d = patch.CellCenterData2d(g)
bc_c = bnd.BC(xlb="neumann", xrb="neumann",
ylb="neumann", yrb="neumann")
d.register_var("alpha", bc_c)
d.register_var("beta", bc_c)
d.register_var("gamma_x", bc_c)
d.register_var("gamma_y", bc_c)
d.create()
a = d.get_var("alpha")
a[:,:] = alpha(g.x2d, g.y2d)
b = d.get_var("beta")
b[:,:] = beta(g.x2d, g.y2d)
gx = d.get_var("gamma_x")
gx[:,:] = gamma_x(g.x2d, g.y2d)
gy = d.get_var("gamma_y")
gy[:,:] = gamma_y(g.x2d, g.y2d)
a = gMG.GeneralMG2d(nx, ny,
xl_BC_type="dirichlet", yl_BC_type="dirichlet",
xr_BC_type="dirichlet", yr_BC_type="dirichlet",
xl_BC=xl_func,
yl_BC=yl_func,
coeffs=d,
verbose=1, vis=0, true_function=true)
a.init_zeros()
a.init_RHS(f(a.x2d, a.y2d))
a.solve(rtol=1.e-10)
v = a.get_solution()
b = true(a.x2d, a.y2d)
e = v - b
e.norm()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Untuk menambahkan, kita bisa menggunakan fungsi append pada list tersebut. Untuk membuang, kita bisa menggunakan fungsi del pada koordinat yang mau dibuang. Kita juga bisa mengganti elemen di dalam tuples.
Step2: Tuples
Step3: Kita juga bisa mengambil data seperti halnya pada list.
Step4: Dictionary
Step5: Kita juga bisa mengkombinasikan antara list dan tuple dengan dictionary. Misalnya seperti contoh berikut.
Step6: Sequences dan Sets
|
<ASSISTANT_TASK:>
Python Code:
# Mendefinisikan isi list bisa dilakukan dengan banyak cara.
# Salah satunya adalah mendeklarasikan isinya dengan meletakkannya
# di antara dua tanda kurung siku atau brackets.
kotakota = ["Bandung", "Jakarta", "Surabaya"]
arahangin = ["Utara", "Barat", "Timur", "Selatan"]
print(kotakota[1],arahangin[-1])
for kota in kotakota:
print(kota)
for arah in arahangin:
print(arah)
for kota in kotakota:
for arah in arahangin:
print("Kabupaten/Kota",kota,arah)
# Mengakses data
print(kotakota[1])
print(arahangin[-1])
# Mengakses potongan data
print(arahangin[0:2])
kotakota.append('London')
print(kotakota)
del arahangin[0]
print(arahangin)
arahangin[0] = 'pusat'
print(arahangin)
# Mendeklarasikan tupel dengan menyebutkan isinya dilakukan
# dengan cara meletakkan isinya di antara dua tanda kurung
# biasa atau parenthesis.
pohon = ("akar","batang","cabang","ranting","daun","buah","biji")
pohon.append("kuncup")
del pohon[0]
# Mengakses data
print(pohon[1])
print(pohon[-2])
# Mengakses potongan data
print(pohon[1:3])
print(pohon[0:-1])
# Pendeklarasian dictionary bisa dilakukan dengan mendeklarasikan
# isinya diantara dua tanda kurung kurawal atau curly braces.
# Isi dari dictionary adalah pasangan, yaitu "kata" dan "artinya"
AM = {
'title' : 'Antimage',
'ras' : 'dark elf',
'atribut utama' : 'agility',
'kelas' : 'carry',
'max level' : 25,
'nama' : "Magina",
}
CM = {
'title' : 'Crystal Maiden',
'ras' : 'human',
'atribut utama' : 'intelegence',
'kelas' : 'support',
'max level' : 25,
'nama' : "Rylai",
}
print("Dua hero di game Dota 2 contohnya adalah",AM['title'],"dan",CM['title'])
print("Keduanya merupakan hero dari Warcraft III dengan nama ",AM['nama'],"dan",CM['nama'])
for title, nama in AM.items():
print('%s hero ini adalah %s' % (title, nama))
heroes = [AM, CM]
for hero in heroes:
print('Hero',hero['title'])
for title, nama in hero.items():
print('%s hero ini adalah %s' % (title, nama))
print("======")
# deklarasi objek pada sequences
namahero = "Ironman"
avengers = ["Ironman", "Thor", "Captain America", "Hulk", \
"Hawk Eye", "Black Widow"]
weapon = ("Wealth", "Hammer", "Shield",\
"Science", "Intelegence")
print(namahero[0:4])
print(avengers[0:4])
for i in range(4):
print(avengers[i],"has weapon that is",weapon[i])
# deklarasi objek pada set
life = "love and friendship need time and money"
# test
print('shot' in life)
print('end' in life)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install BAZEL with Baselisk
Step2: Build .aar files
|
<ASSISTANT_TASK:>
Python Code:
# Create folders
!mkdir -p '/android/sdk'
# Download and move android SDK tools to specific folders
!wget -q 'https://dl.google.com/android/repository/tools_r25.2.5-linux.zip'
!unzip 'tools_r25.2.5-linux.zip'
!mv '/content/tools' '/android/sdk'
# Copy paste the folder
!cp -r /android/sdk/tools /android/android-sdk-linux
# Download NDK, unzip and move contents
!wget 'https://dl.google.com/android/repository/android-ndk-r19c-linux-x86_64.zip'
!unzip 'android-ndk-r19c-linux-x86_64.zip'
!mv /content/android-ndk-r19c /content/ndk
!mv '/content/ndk' '/android'
# Copy paste the folder
!cp -r /android/ndk /android/android-ndk-r19c
# Remove .zip files
!rm 'tools_r25.2.5-linux.zip'
!rm 'android-ndk-r19c-linux-x86_64.zip'
# Make android ndk executable to all users
!chmod -R go=u '/android'
# Set and view environment variables
%env PATH = /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/tools/node/bin:/tools/google-cloud-sdk/bin:/opt/bin:/android/sdk/tools:/android/sdk/platform-tools:/android/ndk
%env ANDROID_SDK_API_LEVEL=29
%env ANDROID_API_LEVEL=29
%env ANDROID_BUILD_TOOLS_VERSION=29.0.2
%env ANDROID_DEV_HOME=/android
%env ANDROID_NDK_API_LEVEL=21
%env ANDROID_NDK_FILENAME=android-ndk-r19c-linux-x86_64.zip
%env ANDROID_NDK_HOME=/android/ndk
%env ANDROID_NDK_URL=https://dl.google.com/android/repository/android-ndk-r19c-linux-x86_64.zip
%env ANDROID_SDK_FILENAME=tools_r25.2.5-linux.zip
%env ANDROID_SDK_HOME=/android/sdk
#%env ANDROID_HOME=/android/sdk
%env ANDROID_SDK_URL=https://dl.google.com/android/repository/tools_r25.2.5-linux.zip
#!echo $PATH
!export -p
# Install specific versions of sdk, tools etc.
!android update sdk --no-ui -a \
--filter tools,platform-tools,android-29,build-tools-29.0.2
# Download Latest version of Bazelisk
!wget https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-linux-amd64
# Make script executable
!chmod +x bazelisk-linux-amd64
# Adding to the path
!sudo mv bazelisk-linux-amd64 /usr/local/bin/bazel
# Extract bazel info
!bazel
# Clone TensorFlow Lite Support repository OR upload your custom folder to build
!git clone https://github.com/tensorflow/tflite-support.git
# Move into tflite-support folder
%cd /content/tflite-support/
!ls
#@title Select library. { display-mode: "form" }
library = 'Support library' #@param ["Support library", "Task Vision library", "Task Text library", "Task Audio library","Metadata library","C++ image_classifier","C++ image_objector","C++ image_segmenter","C++ image_embedder","C++ nl_classifier","C++ bert_nl_classifier", "C++ bert_question_answerer", "C++ metadata_extractor"]
print('You selected:', library)
if library == 'Support library':
library = '//tensorflow_lite_support/java:tensorflowlite_support.aar'
elif library == 'Task Vision library':
library = '//tensorflow_lite_support/java/src/java/org/tensorflow/lite/task/vision:task-library-vision'
elif library == 'Task Text library':
library = '//tensorflow_lite_support/java/src/java/org/tensorflow/lite/task/text:task-library-text'
elif library == 'Task Audio library':
library = '//tensorflow_lite_support/java/src/java/org/tensorflow/lite/task/audio:task-library-audio'
elif library == 'Metadata library':
library = '//tensorflow_lite_support/metadata/java:tensorflow-lite-support-metadata-lib'
elif library == 'C++ image_classifier':
library = '//tensorflow_lite_support/cc/task/vision:image_classifier'
elif library == 'C++ image_objector':
library = '//tensorflow_lite_support/cc/task/vision:image_objector'
elif library == 'C++ image_segmenter':
library = '//tensorflow_lite_support/cc/task/vision:image_segmenter'
elif library == 'C++ image_embedder':
library = '//tensorflow_lite_support/cc/task/vision:image_embedder'
elif library == 'C++ nl_classifier':
library = '//tensorflow_lite_support/cc/task/text/nlclassifier:nl_classifier'
elif library == 'C++ bert_nl_classifier':
library = '//tensorflow_lite_support/cc/task/text/nlclassifier:bert_nl_classifier'
elif library == 'C++ bert_question_answerer':
library = '//tensorflow_lite_support/cc/task/text/qa:bert_question_answerer'
elif library == 'C++ metadata_extractor':
library = '//tensorflow_lite_support/metadata/cc:metadata_extractor'
#@title Select platform(s). { display-mode: "form" }
platforms = 'arm64-v8a,armeabi-v7a' #@param ["arm64-v8a,armeabi-v7a","x86", "x86_64", "arm64-v8a", "armeabi-v7a","x86,x86_64,arm64-v8a,armeabi-v7a"]
print('You selected:', platforms)
# Build library
!bazel build \
--fat_apk_cpu='{platforms}' \
'{library}'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step33: Create Convolutional Model
Step36: Train the Neural Network
Step38: Show Stats
Step39: Hyperparameters
Step41: Train on a Single CIFAR-10 Batch
Step43: Fully Train the Model
Step46: Checkpoint
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
# Use Floyd's cifar-10 dataset if present
floyd_cifar10_location = '/input/cifar-10/python.tar.gz'
if isfile(floyd_cifar10_location):
tar_gz_path = floyd_cifar10_location
else:
tar_gz_path = 'cifar-10-python.tar.gz'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(tar_gz_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
tar_gz_path,
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open(tar_gz_path) as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 4
sample_id = 107
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
# TODO: Implement Function
# Min-max scaling: zi = (xi - min(RGB)) / (max(RGB) - min(RGB)) => zi = (xi - 0) / (255 - 0)
return x / float(255)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
n_classes = 10
result = np.zeros([len(x), n_classes])
for i in range(0, len(x)):
one_hot = np.zeros(n_classes)
one_hot[x[i]] = 1
result[i] = one_hot
return result
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function
return tf.placeholder(tf.float32, shape=[None] + list(image_shape), name="x")
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function
return tf.placeholder(tf.int32, shape=[None, n_classes], name="y")
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
return tf.placeholder(tf.float32, name="keep_prob")
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
weights = tf.Variable(
tf.truncated_normal(
[conv_ksize[0], conv_ksize[1], x_tensor.get_shape().as_list()[-1], conv_num_outputs],
stddev=0.1,
seed=1
)
)
biases = tf.Variable(
tf.truncated_normal([conv_num_outputs], stddev=0.1, seed=1)
)
conv = tf.nn.conv2d(
x_tensor,
weights,
[1, conv_strides[0], conv_strides[1], 1],
"SAME"
)
conv = tf.add(conv, biases)
conv = tf.nn.relu(conv)
pool = tf.nn.max_pool(
conv,
[1, pool_ksize[0], pool_ksize[1],1 ],
[1, pool_strides[0], pool_strides[1], 1],
"SAME"
)
return pool
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
from functools import reduce
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
flat_size = reduce(lambda x, y: x * y, x_tensor.get_shape().as_list()[1:])
return tf.reshape(x_tensor, [-1, flat_size])
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
weights = tf.Variable(
tf.truncated_normal(
[x_tensor.get_shape().as_list()[-1], num_outputs],
stddev=0.1,
seed=1
)
)
biases = tf.Variable(
tf.truncated_normal([num_outputs], stddev=0.1, seed=1)
)
result = tf.matmul(x_tensor, weights)
result = tf.add(result, biases)
result = tf.nn.relu(result)
return result
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
weights = tf.Variable(
tf.truncated_normal(
[x_tensor.get_shape().as_list()[-1], num_outputs],
stddev=0.1,
seed=1
)
)
biases = tf.Variable(
tf.truncated_normal([num_outputs], stddev=0.1, seed=1)
)
result = tf.matmul(x_tensor, weights)
result = tf.add(result, biases)
return result
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
conv_layer_1 = conv2d_maxpool(
x, 128, (2, 2), (2, 2), (2, 2), (2, 2)
)
conv_layer_2 = conv2d_maxpool(
conv_layer_1, 1024, (2, 2), (2, 2), (2, 2), (2, 2)
)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
flat = flatten(conv_layer_2)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
conn_layer_1 = fully_conn(flat, 512)
conn_layer_1 = tf.nn.dropout(conn_layer_1, keep_prob)
conn_layer_2 = fully_conn(conn_layer_1, 32)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
result = output(conn_layer_2, 10)
# TODO: return output
return result
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function
session.run(
optimizer,
feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability}
)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
loss = session.run(
cost, feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0}
)
valid_acc = session.run(
accuracy, feed_dict={x: valid_features, y: valid_labels,keep_prob: 1.0}
)
print("Loss: {:3.5f}, Validation Accuracy: {:0.5f}".format(loss, valid_acc))
# TODO: Tune Parameters
epochs = 25
batch_size = 512
keep_probability = 0.75
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The dataset is perfectly balanced across the two categories POSITIVE and NEGATIVE.
Step2: Well, at a first glance, that seems dissapointing. As expected, the most common words are some linking words like "the", "of", "for", "at", etc. Counting the words for POSITIVE and NEGATIVE reviews separetely might appear pontless at first, as the same linking words are found among the most common for both the POSITIVE and NEGATIVE reviews.
Step3: Well that looks like a normal distribution with a considerable amount of words that were used only in POSITIVE and only in NEGATIVE reviews. Could it be, those are words that occur only once or twice in the review corpus? They are not necessarly useful when identifying the sentiment, as they occur only in one of few reviews. If that is the case it would be better to exclude these words. We want our models to generalize well instead of overfitting on some very rare words. Let's exclude all words that occur less than 'min_occurance' times in the whole review corpus.
Step4: And that is the beautiful normal destribution that I was expecting. The total word count shrinked from 74074 to 4276. Hence, there are many words that have been used only few times. Looking at the figure, there are a lot of neutral words in our new sentiment selection, but there are also some words that are used almost exclusively in POSITIVE or NEGATIVE reviews. You can try different values for 'min_occurance' and observe how the amount of total words and the plot is changing. Let's check out the words for min_occurance = 100.
Step5: There are a lot of names among the words with positive sentiment. For example, edie (probably from edie falco, who won 2 Golden Globes and another 21 wins & 70 nominations), polanski (probably from roman polanski, who won 1 oscar and another 83 wins & 75 nominations). But there are also words like "superbly", "breathtaking", "refreshing", etc. Those are exactly the positive sentiment loaded words I was looking for. Similarly, there are words like "insult", "uninspired", "lame", "sucks", "miserably", "boredom" that no director would be happy to read in the reviews regarding his movie. One name catches the eye - that is "seagal", (probably from Steven Seagal). Well, I won't comment on that.
Step6: The classifier has only two parameters - 'min_word_count' and 'sentiment_threshold'. A min_word_count of 20 means the classifier will only consider words that occur at least 20 times in the review corpus. The 'sentiment_threshhold' allows you to ignore words with rather neutral sentiment. A 'sentiment_threshhold' of 0.3 means that only words with sentiment ratio of more than 0.3 or less than -0.3 would be considered in the prediction process. What the classifier does is creating the sentiment ratio like previosly shown. When predicting the sentiment, the classifier uses the sentiment ratio dict to sum up all sentiment ratios of all the words used in the review. If the overall sum is positive the sentiment is also positive. If the overall sum is negative the sentiment is also negative. It is pretty simple, isn't it? Let's measure the performance in a 5 fold cross-validation setting
Step7: A cross-validaiton accuracy of 85.7% is not bad for this naive approach and a classifier that trains in only a few seconds. At this point you will be asking yourself, can this score be easily beaten with the use of a neural network. Let's see.
Step8: Same as before, the create_word2index has the two parameters 'min_occurance' and 'sentiment_threshhold' Check the explanation of those two in the previous section. Anyway, once you have the word2index dict, you can encode the reviews with the function below
Step9: Labels are easily one-hot encoded. Check out this explanation on why one-hot encoding is needed
Step10: At this point, you can transform both the reviews and the labels into data that the neural network can understand. Let's do that
Step11: You are good to go and train the neural network. In the example below, I'am using a simple neural network consisting of two fully connected layers. Trying different things out, I found Dropout before the first layer can reduce overfitting. Dropout between the first and the second layer, however, made the performance worse. Increasing the the number of the hidden units in the two layers did't lead to better performance, but to more overfitting. Increasing the number of layers made no difference.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
with open('data/reviews.txt','r') as file_handler:
reviews = np.array(list(map(lambda x:x[:-1], file_handler.readlines())))
with open('data/labels.txt','r') as file_handler:
labels = np.array(list(map(lambda x:x[:-1].upper(), file_handler.readlines())))
unique, counts = np.unique(labels, return_counts=True)
print('Reviews', len(reviews), 'Labels', len(labels), dict(zip(unique, counts)))
for i in range(10):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
from collections import Counter
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
# Examine the counts of the most common words in positive reviews
print('Most common words:', total_counts.most_common()[0:30])
print('\nMost common words in NEGATIVE reviews:', negative_counts.most_common()[0:30])
print('\nMost common words in POSITIVE reviews:', positive_counts.most_common()[0:30])
import seaborn as sns
sentiment_ratio = Counter()
for word, count in list(total_counts.most_common()):
sentiment_ratio[word] = ((positive_counts[word] / total_counts[word]) - 0.5) / 0.5
print('Total words in sentiment ratio', len(sentiment_ratio))
sns.distplot(list(sentiment_ratio.values()));
min_occurance = 100
sentiment_ratio = Counter()
for word, count in list(total_counts.most_common()):
if total_counts[word] >= min_occurance: # only consider words
sentiment_ratio[word] = ((positive_counts[word] / total_counts[word]) - 0.5) / 0.5
print('Total words in sentiment ratio', len(sentiment_ratio))
sns.distplot(list(sentiment_ratio.values()));
print('Words with the most POSITIVE sentiment' ,sentiment_ratio.most_common()[:30])
print('\nWords with the most NEGATIVE sentiment' ,sentiment_ratio.most_common()[-30:])
class NaiveSentimentClassifier:
def __init__(self, min_word_count, sentiment_threshold):
self.min_word_count = min_word_count
self.sentiment_threshold = sentiment_threshold
def fit(self, reviews, labels):
positive_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
total_counts[word] += 1
self.sentiment_ratios = Counter()
for word, count in total_counts.items():
if(count > self.min_word_count):
self.sentiment_ratios[word] = \
((positive_counts[word] / count) - 0.5) / 0.5
def predict(self, reviews):
predictions = []
for review in reviews:
sum_review_sentiment = 0
for word in review.split(" "):
if abs(self.sentiment_ratios[word]) >= self.sentiment_threshold:
sum_review_sentiment += self.sentiment_ratios[word]
if sum_review_sentiment >= 0:
predictions.append('POSITIVE')
else:
predictions.append('NEGATIVE')
return predictions
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
all_predictions = []
all_true_labels = []
for train_index, validation_index in KFold(n_splits=5, random_state=42, shuffle=True).split(labels):
trainX, trainY = reviews[train_index], labels[train_index]
validationX, validationY = reviews[validation_index], labels[validation_index]
classifier = NaiveSentimentClassifier(20, 0.3)
classifier.fit(trainX, trainY)
predictions = classifier.predict(validationX)
print('Fold accuracy', accuracy_score(validationY, predictions))
all_predictions += predictions
all_true_labels += list(validationY)
print('CV accuracy', accuracy_score(all_true_labels, all_predictions))
def create_word2index(min_occurance, sentiment_threshold):
word2index = {}
index = 0
sentiment_ratio = Counter()
for word, count in list(total_counts.most_common()):
sentiment_ratio[word] = ((positive_counts[word] / total_counts[word]) - 0.5) / 0.5
is_word_eligable = lambda word: word not in word2index and \
total_counts[word] >= min_occurance and \
abs(sentiment_ratio[word]) >= sentiment_threshold
for i in range(len(reviews)):
for word in reviews[i].split(" "):
if is_word_eligable(word):
word2index[word] = index
index += 1
print("Word2index contains", len(word2index), 'words.')
return word2index
def encode_reviews_by_word_count(word2index):
encoded_reviews = []
for i in range(len(reviews)):
review_array = np.zeros(len(word2index))
for word in reviews[i].split(" "):
if word in word2index:
review_array[word2index[word]] += 1
encoded_reviews.append(review_array)
encoded_reviews = np.array(encoded_reviews)
print('Encoded reviews matrix shape', encoded_reviews.shape)
return encoded_reviews
def encode_labels():
encoded_labels = []
for label in labels:
if label == 'POSITIVE':
encoded_labels.append([0, 1])
else:
encoded_labels.append([1, 0])
return np.array(encoded_labels)
word2index = create_word2index(min_occurance=10, sentiment_threshold=0.2)
encoded_reviews = encode_reviews_by_word_count(word2index)
encoded_labels = encode_labels()
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Dropout, Input
from keras import metrics
all_predictions = []
all_true_labels = []
model_index = 0
for train_index, validation_index in \
KFold(n_splits=5, random_state=42, shuffle=True).split(encoded_labels):
model_index +=1
model_path= 'models/model_' + str(model_index)
print('Training model: ', model_path)
train_X, train_Y = encoded_reviews[train_index], encoded_labels[train_index]
validation_X, validation_Y = encoded_reviews[validation_index], encoded_labels[validation_index]
save_best_model = ModelCheckpoint(
model_path,
monitor='val_loss',
save_best_only=True,
save_weights_only=True)
model = Sequential()
model.add(Dropout(0.3, input_shape=(len(word2index),)))
model.add(Dense(10, activation="relu"))
model.add(Dense(10, activation="relu"))
model.add(Dense(2, activation="softmax"))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=[metrics.categorical_accuracy])
model.fit(train_X, train_Y,
validation_data=(validation_X, validation_Y),
callbacks = [save_best_model],
epochs=20, batch_size=32, verbose=0)
model.load_weights(model_path)
all_true_labels += list(validation_Y[:, 0])
all_predictions += list(model.predict(validation_X)[:, 0] > 0.5)
print('CV accuracy', accuracy_score(all_true_labels, all_predictions))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Layout of the corrugated structure insertion. Create Ocelot lattice <img src="4_layout.png" />
Step2: Load beam file
Step3: Initialization of the wakes and the places of their applying
Step4: Add the wakes in the lattice
Step5: Longitudinal beam distribution
Step6: Beam distribution
Step7: Wakefields of a Beam near a Single Plate in a Flat Dechirper
Step8: Describe corrugated structure and add wake to the lattice
Step9: Track the beam through the lattice
Step10: Crosscheck with analytics
Step11: Dipole wake
Step13: Convolution wake with the beam current
|
<ASSISTANT_TASK:>
Python Code:
# the output of plotting commands is displayed inline within frontends,
# directly below the code cell that produced it
%matplotlib inline
# this python library provides generic shallow (copy) and deep copy (deepcopy) operations
from copy import deepcopy
import time
# import from Ocelot main modules and functions
from ocelot import *
# import from Ocelot graphical modules
from ocelot.gui.accelerator import *
# load beam distribution
# this function convert Astra beam distribution to Ocelot format
# - ParticleArray. ParticleArray is designed for tracking.
# in order to work with converters we have to import
# specific module from ocelot.adaptors
from ocelot.adaptors.astra2ocelot import *
D00m25 = Drift(l = 0.25)
D01m = Drift(l = 1)
D02m = Drift(l = 2)
# Create markers for defining places of the wakes applying
w1_start = Marker()
w1_stop = Marker()
w2_start = Marker()
w2_stop = Marker()
w3_start = Marker()
w3_stop = Marker()
w4_start = Marker()
w4_stop = Marker()
w5_start = Marker()
w5_stop = Marker()
w6_start = Marker()
w6_stop = Marker()
# quadrupoles
Q1 = Quadrupole(l = 0.5, k1 = 0.215)
# lattice
lattice = (D01m, w1_start, D02m, w1_stop, w2_start, D02m, w2_stop,
w3_start, D02m, w3_stop, D00m25, Q1, D00m25,
w4_start, D02m, w4_stop, w5_start, D02m, w5_stop,
w6_start, D02m, w6_stop, D01m)
# creation MagneticLattice
method = MethodTM()
method.global_method = SecondTM
lat = MagneticLattice(lattice, method=method)
# calculate twiss functions with initial twiss parameters
tws0 = Twiss()
tws0.E = 14 # in GeV
tws0.beta_x = 22.5995
tws0.beta_y = 22.5995
tws0.alpha_x = -1.4285
tws0.alpha_y = 1.4285
tws = twiss(lat, tws0, nPoints=None)
# ploting twiss paramentrs.
plot_opt_func(lat, tws, top_plot=["Dx"], fig_name="i1", legend=False)
plt.show()
# load and convert ASTRA file to OCELOT beam distribution
# p_array_init = astraBeam2particleArray(filename='beam_chirper.ast')
# save ParticleArray to compresssed numpy array
# save_particle_array("chirper_beam.npz", p_array_init)
p_array_init = load_particle_array("chirper_beam.npz")
plt.plot(-p_array_init.tau()*1000, p_array_init.p(), "r.")
plt.grid(True)
plt.xlabel(r"$\tau$, mm")
plt.ylabel(r"$\frac{\Delta E}{E}$")
plt.show()
from ocelot.cpbd.wake3D import *
# load wake tables of corrugated structures
wk_vert = WakeTable('wake_vert_1m.txt')
wk_hor = WakeTable('wake_hor_1m.txt')
# creation of wake object with parameters
wake_v1 = Wake()
# w_sampling - defines the number of the equidistant sampling points for the one-dimensional
# wake coefficients in the Taylor expansion of the 3D wake function.
wake_v1.w_sampling = 500
wake_v1.wake_table = wk_vert
wake_v1.step = 1 # step in Navigator.unit_step, dz = Navigator.unit_step * wake.step [m]
wake_h1 = Wake()
wake_h1.w_sampling = 500
wake_h1.wake_table = wk_hor
wake_h1.step = 1
wake_v2 = deepcopy(wake_v1)
wake_h2 = deepcopy(wake_h1)
wake_v3 = deepcopy(wake_v1)
wake_h3 = deepcopy(wake_h1)
navi = Navigator(lat)
# add physics proccesses
navi.add_physics_proc(wake_v1, w1_start, w1_stop)
navi.add_physics_proc(wake_h1, w2_start, w2_stop)
navi.add_physics_proc(wake_v2, w3_start, w3_stop)
navi.add_physics_proc(wake_h2, w4_start, w4_stop)
navi.add_physics_proc(wake_v3, w5_start, w5_stop)
navi.add_physics_proc(wake_h3, w6_start, w6_stop)
# definiing unit step in [m]
navi.unit_step = 0.2
# deep copy of the initial beam distribution
p_array = deepcopy(p_array_init)
print("tracking with Wakes .... ")
start = time.time()
tws_track, p_array = track(lat, p_array, navi)
print("\n time exec:", time.time() - start, "sec")
tau0 = p_array_init.tau()
p0 = p_array_init.p()
tau1 = p_array.tau()
p1 = p_array.p()
print(len(p1))
plt.figure(1)
plt.plot(-tau0*1000, p0, "r.", -tau1*1000, p1, "b.")
plt.legend(["before", "after"], loc=4)
plt.grid(True)
plt.xlabel(r"$\tau$, mm")
plt.ylabel(r"$\frac{\Delta E}{E}$")
plt.show()
# by default the beam head on the left side
show_e_beam(p_array, figsize=(8,6))
plt.show()
# plotting twiss parameters.
plot_opt_func(lat, tws_track, top_plot=["Dx"], fig_name="i1", legend=False)
plt.show()
# create a simple lattice MagneticLattice
m1 = Marker()
m2 = Marker()
# quadrupoles
Q1 = Quadrupole(l = 0.5, k1 = 0.215)
lattice = (Drift(l=1), m1, Drift(l=1), m2, Drift(l=2), Q1, Drift(l=2))
method = MethodTM()
method.global_method = SecondTM
lat = MagneticLattice(lattice, method=method)
# description of args can be also be shown with Shift+Tab
sigma = np.std(p_array.tau())
print("RMS long beam size: ", sigma * 1e6, " um")
wk_tv_kick = WakeTableDechirperOffAxis(b=500*1e-6, # distance from the plate in [m]
a=0.01, # half gap between plates in [m]
width=0.02, # width of the corrugated structure in [m]
t=0.25*1e-3, # longitudinal gap in [m]
p=0.5*1e-3, # period of corrugation in [m]
length=1, # length of the corrugated structure in [m]
sigma=12e-6, # characteristic (rms) longitudinal beam size in [m]
orient="horz") # "horz" or "vert" plate orientation
# creation of wake object with parameters
wake = Wake()
# w_sampling - defines the number of the equidistant sampling points for the one-dimensional
# wake coefficients in the Taylor expansion of the 3D wake function.
wake.w_sampling = 500
wake.wake_table = wk_tv_kick
wake.step = 1 # step in Navigator.unit_step, dz = Navigator.unit_step * wake.step [m]
navi = Navigator(lat)
# add physics proccesses
navi.add_physics_proc(wake, m1, m2)
# deep copy of the initial beam distribution
p_array = deepcopy(p_array_init)
print("tracking with Wakes .... ")
start = time.time()
tws_track, p_array = track(lat, p_array, navi)
print("\n time exec:", time.time() - start, "sec")
# by default the beam head on the left side
show_e_beam(p_array, figsize=(8,6))
plt.show()
p = 0.5e-3 # period
t = 0.25e-3 # Longitudinal gap
b = 500e-6 # Distance from the plate
alpha = 1 - 0.465 * np.sqrt(t/p) - 0.07 * t/p
print("alpha = ", alpha)
s0 = 8*b**2 * t / (9 * np.pi * alpha**2 * p**2)
print("s0 = ", s0*1e6, " [um]")
s = np.linspace(0, 100, num=100) *1e-6
w = lambda s: 2./b**3 * s0 * (1 - (1 + np.sqrt(s/s0)) * np.exp(- np.sqrt(s/s0))) * Z0 * speed_of_light / (4 * np.pi)
MV = 1e6
nC = 1e-9
plt.plot(s*1e6, np.array([w(si) for si in s])*nC/MV )
plt.xlabel("s [um]")
plt.ylabel("Wd [MV/(nC m)]")
plt.show()
def convolve_beam(current, wake):
convolve wake with beam current
:param current: current[:, 0] - s in [m], current[:, 1] - current in [A]. The beam head is on the left
:param wake: wake function in form: wake(s)
:return: wake_kick[:, 0] - s in [m], wake_kick[:, 1] - V
s_shift = current[0, 0]
current[:, 0] -= s_shift
s = current[:, 0]
step = (s[-1] - s[0]) / (len(s) - 1)
q = current[:, 1] / speed_of_light
w = np.array([wake(si) for si in s])
wake = np.convolve(q, w) * step
s_new = np.cumsum(np.ones(len(wake))) * step
wake_kick = np.vstack((s_new, wake))
return wake_kick.T
I = s_to_cur(p_array.tau(), 0.01 * np.std(p_array.tau()), np.sum(p_array.q_array), speed_of_light)
dipole_kick = convolve_beam(I, w)
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('s [mm]')
ax1.set_ylabel('Wake [MV]', color=color)
ax1.plot(dipole_kick[:, 0] * 1e3, dipole_kick[:, 1] * 1e-6, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('I [kA]', color=color)
ax2.plot(I[:, 0] * 1e3, I[:, 1] * 1e-3, color=color)
ax2.tick_params(axis='y', labelcolor=color)
plt.show()
length = 1 # corrugated structure in [m]
p_array = deepcopy(p_array_init)
z = p_array.tau()
ind_z_sort = np.argsort(z)
z_sort = z[ind_z_sort]
wd = np.interp(z_sort - z_sort[0], dipole_kick[:, 0], dipole_kick[:, 1])
delta_E_y = wd * 1e-9 * length
pc_ref = np.sqrt(p_array.E ** 2 / m_e_GeV ** 2 - 1) * m_e_GeV
delta_py = delta_E_y / pc_ref
p_array.rparticles[3][ind_z_sort] += delta_py
show_density(p_array.tau() * 1e3, p_array.py() * 1e3, ax=None, nbins_x=250, nbins_y=250,
interpolation="bilinear", xlabel="s [mm]", ylabel='py [mrad]', nfig=50,
title="Side view", figsize=None, grid=False)
plt.show()
p_array = deepcopy(p_array_init)
wk_tv_kick = WakeTableDechirperOffAxis(b=b, # distance from the plate in [m]
a=0.01, # half gap between plates in [m]
width=0.02, # width of the corrugated structure in [m]
t=t, # longitudinal gap in [m]
p=p, # period of corrugation in [m]
length=1, # length of the corrugated structure in [m]
sigma=12e-6, # characteristic (rms) longitudinal beam size in [m]
orient="horz") # "horz" or "vert" plate orientation
# creation of wake object with parameters
wake = Wake()
# w_sampling - defines the number of the equidistant sampling points for the one-dimensional
# wake coefficients in the Taylor expansion of the 3D wake function.
wake.w_sampling = 500
wake.wake_table = wk_tv_kick
wake.step = 1 # step in Navigator.unit_step, dz = Navigator.unit_step * wake.step [m]
wake.prepare(None)
wake.s_start = 0
wake.s_stop = 1
wake.apply(p_array, dz=1)
show_density(p_array.tau() * 1e6, p_array.py() * 1e3, ax=None, nbins_x=250, nbins_y=250,
interpolation="bilinear", xlabel="s [um]", ylabel='py [mrad]', nfig=60,
title="Side view", figsize=None, grid=False)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples of broken lines
Step2: The command print inserts spaces between expressions that are received as a parameter, and a newline character at the end, unless it receives a comma at the end of the parameter list.
Step3: The operator % computes the modulus (remainder of division).
|
<ASSISTANT_TASK:>
Python Code:
#!/usr/bin/env python
# A code line that shows the result of 7 times 3
print 7 * 3
# A line broken by backslash
a = 7 * 3 + \
5 / 2
# A list (broken by comma)
b = ['a', 'b', 'c',
'd', 'e']
# A function call (broken by comma)
c = range(1,
11)
# Prints everything
print a, b, c
# For i on the list 234, 654, 378, 798:
for i in [234, 654, 378, 798]:
# If the remainder dividing by 3 is equal to zero:
if i % 3 == 0:
# Prints...
print i, '/ 3 =', i / 3
for i in [256, 768, 32, 1894]:
if i % 3 == 0:
print(i, "/ 3 =", i/3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model trained on on 0.1 split of data
Step2: Plot train and valid set NLL
Step3: Strangely though overfitting to training set does not seem to be increasing validation set NLL?
Step4: Plot ratio of update norms to parameter norms across epochs for different layers
|
<ASSISTANT_TASK:>
Python Code:
import pylearn2.utils
import pylearn2.config
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import os.path
model = pylearn2.utils.serial.load(os.path.expandvars('${DATA_DIR}/plankton/models/learning_rate_experiment/ilr_5e-2_lin_decay_adj_on_recent.pkl'))
print(model)
plt.plot(model.monitor.channels['valid_y_y_1_nll'].val_record)
plt.plot(model.monitor.channels['train_y_y_1_nll'].val_record)
plt.legend(['Valid', 'Train'])
plt.ylabel('NLL')
plt.xlabel('Epochs')
mean_update_channels = [c for c in model.monitor.channels if 'mean_update' in c]
print('\n'.join(mean_update_channels))
param_norm_channels = [c for c in model.monitor.channels if 'norms_mean' in c]
print('\n'.join(param_norm_channels))
h1_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h1_W_kernel_norm_mean'].val_record])
h1_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h1_kernel_norms_mean'].val_record])
plt.plot(h1_W_norms / h1_W_up_norms)
h2_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h2_W_kernel_norm_mean'].val_record])
h2_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h2_kernel_norms_mean'].val_record])
plt.plot(h2_W_norms / h2_W_up_norms)
h3_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h3_W_kernel_norm_mean'].val_record])
h3_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h3_kernel_norms_mean'].val_record])
plt.plot(h3_W_norms / h3_W_up_norms)
h4_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h4_W_col_norm_mean'].val_record])
h4_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h4_col_norms_mean'].val_record])
plt.plot(h4_W_norms / h4_W_up_norms)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading data
Step2: Training variables
Step3: Folding strategy - stacking algorithm
Step4: Define folding model
Step5: Default prediction (predict i_th_ fold by i_th_ classifier)
Step6: Voting prediction (predict i-fold by all classifiers and take value, which is calculated by vote_function)
Step7: Comparison of folds
Step8: Signal distribution for each fold
Step9: Background distribution for each fold
Step10: ROCs (each fold used as test dataset)
Step11: Report for test dataset
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
!cd toy_datasets; wget -O MiniBooNE_PID.txt -nc MiniBooNE_PID.txt https://archive.ics.uci.edu/ml/machine-learning-databases/00199/MiniBooNE_PID.txt
import numpy, pandas
from rep.utils import train_test_split
from sklearn.metrics import roc_auc_score
data = pandas.read_csv('toy_datasets/MiniBooNE_PID.txt', sep='\s*', skiprows=[0], header=None, engine='python')
labels = pandas.read_csv('toy_datasets/MiniBooNE_PID.txt', sep=' ', nrows=1, header=None)
labels = [1] * labels[1].values[0] + [0] * labels[2].values[0]
data.columns = ['feature_{}'.format(key) for key in data.columns]
train_data, test_data, train_labels, test_labels = train_test_split(data, labels, train_size=0.5)
variables = list(data.columns)
from rep.estimators import SklearnClassifier
from sklearn.ensemble import GradientBoostingClassifier
from rep.metaml import FoldingClassifier
n_folds = 4
folder = FoldingClassifier(GradientBoostingClassifier(), n_folds=n_folds, features=variables)
folder.fit(train_data, train_labels)
folder.predict_proba(train_data)
# definition of mean function, which combines all predictions
def mean_vote(x):
return numpy.mean(x, axis=0)
folder.predict_proba(test_data, vote_function=mean_vote)
from rep.data.storage import LabeledDataStorage
from rep.report import ClassificationReport
# add folds_column to dataset to use mask
train_data["FOLDS"] = folder._get_folds_column(len(train_data))
lds = LabeledDataStorage(train_data, train_labels)
report = ClassificationReport({'folding': folder}, lds)
for fold_num in range(n_folds):
report.prediction_pdf(mask="FOLDS == %d" % fold_num, labels_dict={1: 'sig fold %d' % fold_num}).plot()
for fold_num in range(n_folds):
report.prediction_pdf(mask="FOLDS == %d" % fold_num, labels_dict={0: 'bck fold %d' % fold_num}).plot()
for fold_num in range(n_folds):
report.roc(mask="FOLDS == %d" % fold_num).plot()
lds = LabeledDataStorage(test_data, test_labels)
report = ClassificationReport({'folding': folder}, lds)
report.prediction_pdf().plot(new_plot=True, figsize = (9, 4))
report.roc().plot(xlim=(0.5, 1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Looking at the above plot, it is clear that when the sample size is small, there is greater variation in the average (compare how jagged and jumpy the average is initially, then smooths out). All three paths approach the value 4.5, but just flirt with it as $N$ gets large. Mathematicians and statistician have another name for flirting
Step3: As expected, the expected distance between our sample average and the actual expected value shrinks as $N$ grows large. But also notice that the rate of convergence decreases, that is, we need only 10 000 additional samples to move from 0.020 to 0.015, a difference of 0.005, but 20 000 more samples to again decrease from 0.015 to 0.010, again only a 0.005 decrease.
Step4: What does this all have to do with Bayesian statistics?
Step5: What do we observe? Without accounting for population sizes we run the risk of making an enormous inference error
Step6: Not at all uniform over 100 to 1500. This is an absolute failure of the Law of Large Numbers.
Step8: The above is a classic phenomenon in statistics. I say classic referring to the "shape" of the scatter plot above. It follows a classic triangular form, that tightens as we increase the sample size (as the Law of Large Numbers becomes more exact).
Step10: For a given true upvote ratio $p$ and $N$ votes, the number of upvotes will look like a Binomial random variable with parameters $p$ and $N$. (This is because of the equivalence between upvote ratio and probability of upvoting versus downvoting, out of $N$ possible votes/trials). We create a function that performs Bayesian inference on $p$, for a particular submission's upvote/downvote pair.
Step11: Below are the resulting posterior distributions.
Step12: Some distributions are very tight, others have very long tails (relatively speaking), expressing our uncertainty with what the true upvote ratio might be.
Step13: The best submissions, according to our procedure, are the submissions that are most-likely to score a high percentage of upvotes. Visually those are the submissions with the 95% least plausible value close to 1.
Step14: We can view the ordering visually by plotting the posterior mean and bounds, and sorting by the lower bound. In the plot below, notice that the left error-bar is sorted (as we suggested this is the best way to determine an ordering), so the means, indicated by dots, do not follow any strong pattern.
Step15: In the graphic above, you can see why sorting by mean would be sub-optimal.
Step16: 2. The following table was located in the paper "Going for Three
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from IPython.core.pylabtools import figsize
import matplotlib.pyplot as plt
figsize( 12.5, 5 )
sample_size = 100000
expected_value = lambda_ = 4.5
poi = np.random.poisson
N_samples = range(1,sample_size,100)
for k in range(3):
samples = poi( lambda_, sample_size )
partial_average = [ samples[:i].mean() for i in N_samples ]
plt.plot( N_samples, partial_average, lw=1.5,label="average \
of $n$ samples; seq. %d"%k)
plt.plot( N_samples, expected_value*np.ones_like( partial_average),
ls = "--", label = "true expected value", c = "k" )
plt.ylim( 4.35, 4.65)
plt.title( "Convergence of the average of \n random variables to its \
expected value" )
plt.ylabel( "average of $n$ samples" )
plt.xlabel( "# of samples, $n$")
plt.legend();
figsize( 12.5, 4)
N_Y = 250 #use this many to approximate D(N)
N_array = np.arange( 1000, 50000, 2500 ) #use this many samples in the approx. to the variance.
D_N_results = np.zeros( len( N_array ) )
lambda_ = 4.5
expected_value = lambda_ #for X ~ Poi(lambda) , E[ X ] = lambda
def D_N( n ):
This function approx. D_n, the average variance of using n samples.
Z = poi( lambda_, (n, N_Y) )
average_Z = Z.mean(axis=0)
return np.sqrt( ( (average_Z - expected_value)**2 ).mean() )
for i,n in enumerate(N_array):
D_N_results[i] = D_N(n)
plt.xlabel( "$N$" )
plt.ylabel( "expected squared-distance from true value" )
plt.plot(N_array, D_N_results, lw = 3,
label="expected distance between\n\
expected value and \naverage of $N$ random variables.")
plt.plot( N_array, np.sqrt(expected_value)/np.sqrt(N_array), lw = 2, ls = "--",
label = r"$\frac{\sqrt{\lambda}}{\sqrt{N}}$" )
plt.legend()
plt.title( "How 'fast' is the sample average converging? " );
N = 10000
print( np.mean( [ np.random.exponential( 0.5 ) > 5 for i in range(N) ] ) )
figsize( 12.5, 4)
std_height = 15
mean_height = 150
n_counties = 5000
pop_generator = np.random.randint
norm = np.random.normal
#generate some artificial population numbers
population = pop_generator(100, 1500, n_counties )
average_across_county = np.zeros( n_counties )
for i in range( n_counties ):
#generate some individuals and take the mean
average_across_county[i] = norm(mean_height, 1./std_height,
population[i] ).mean()
#located the counties with the apparently most extreme average heights.
i_min = np.argmin( average_across_county )
i_max = np.argmax( average_across_county )
#plot population size vs. recorded average
plt.scatter( population, average_across_county, alpha = 0.5, c="#7A68A6")
plt.scatter( [ population[i_min], population[i_max] ],
[average_across_county[i_min], average_across_county[i_max] ],
s = 60, marker = "o", facecolors = "none",
edgecolors = "#A60628", linewidths = 1.5,
label="extreme heights")
plt.xlim( 100, 1500 )
plt.title( "Average height vs. County Population")
plt.xlabel("County Population")
plt.ylabel("Average height in county")
plt.plot( [100, 1500], [150, 150], color = "k", label = "true expected \
height", ls="--" )
plt.legend(scatterpoints = 1);
print("Population sizes of 10 'shortest' counties: ")
print(population[ np.argsort( average_across_county )[:10] ], '\n')
print("Population sizes of 10 'tallest' counties: ")
print(population[ np.argsort( -average_across_county )[:10] ])
figsize( 12.5, 6.5 )
data = np.genfromtxt( "./data/census_data.csv", skip_header=1,
delimiter= ",")
plt.scatter( data[:,1], data[:,0], alpha = 0.5, c="#7A68A6")
plt.title("Census mail-back rate vs Population")
plt.ylabel("Mail-back rate")
plt.xlabel("population of block-group")
plt.xlim(-100, 15e3 )
plt.ylim( -5, 105)
i_min = np.argmin( data[:,0] )
i_max = np.argmax( data[:,0] )
plt.scatter( [ data[i_min,1], data[i_max, 1] ],
[ data[i_min,0], data[i_max,0] ],
s = 60, marker = "o", facecolors = "none",
edgecolors = "#A60628", linewidths = 1.5,
label="most extreme points")
plt.legend(scatterpoints = 1);
#adding a number to the end of the %run call with get the ith top post.
%run top_showerthoughts_submissions.py 2
print("Post contents: \n")
print(top_post)
contents: an array of the text from the last 100 top submissions to a subreddit
votes: a 2d numpy array of upvotes, downvotes for each submission.
n_submissions = len(votes)
submissions = np.random.randint( n_submissions, size=4)
print("Some Submissions (out of %d total) \n-----------"%n_submissions)
for i in submissions:
print('"' + contents[i] + '"')
print("upvotes/downvotes: ",votes[i,:], "\n")
import pymc3 as pm
def posterior_upvote_ratio( upvotes, downvotes, samples = 20000):
This function accepts the number of upvotes and downvotes a particular submission recieved,
and the number of posterior samples to return to the user. Assumes a uniform prior.
N = upvotes + downvotes
with pm.Model() as model:
upvote_ratio = pm.Uniform("upvote_ratio", 0, 1)
observations = pm.Binomial( "obs", N, upvote_ratio, observed=upvotes)
trace = pm.sample(samples, step=pm.Metropolis())
burned_trace = trace[int(samples/4):]
return burned_trace["upvote_ratio"]
figsize( 11., 8)
posteriors = []
colours = ["#348ABD", "#A60628", "#7A68A6", "#467821", "#CF4457"]
for i in range(len(submissions)):
j = submissions[i]
posteriors.append( posterior_upvote_ratio( votes[j, 0], votes[j,1] ) )
plt.hist( posteriors[i], bins = 10, normed = True, alpha = .9,
histtype="step",color = colours[i%5], lw = 3,
label = '(%d up:%d down)\n%s...'%(votes[j, 0], votes[j,1], contents[j][:50]) )
plt.hist( posteriors[i], bins = 10, normed = True, alpha = .2,
histtype="stepfilled",color = colours[i], lw = 3, )
plt.legend(loc="upper left")
plt.xlim( 0, 1)
plt.title("Posterior distributions of upvote ratios on different submissions");
N = posteriors[0].shape[0]
lower_limits = []
for i in range(len(submissions)):
j = submissions[i]
plt.hist( posteriors[i], bins = 20, normed = True, alpha = .9,
histtype="step",color = colours[i], lw = 3,
label = '(%d up:%d down)\n%s...'%(votes[j, 0], votes[j,1], contents[j][:50]) )
plt.hist( posteriors[i], bins = 20, normed = True, alpha = .2,
histtype="stepfilled",color = colours[i], lw = 3, )
v = np.sort( posteriors[i] )[ int(0.05*N) ]
#plt.vlines( v, 0, 15 , color = "k", alpha = 1, linewidths=3 )
plt.vlines( v, 0, 10 , color = colours[i], linestyles = "--", linewidths=3 )
lower_limits.append(v)
plt.legend(loc="upper left")
plt.legend(loc="upper left")
plt.title("Posterior distributions of upvote ratios on different submissions");
order = np.argsort( -np.array( lower_limits ) )
print(order, lower_limits)
def intervals(u,d):
a = 1. + u
b = 1. + d
mu = a/(a+b)
std_err = 1.65*np.sqrt( (a*b)/( (a+b)**2*(a+b+1.) ) )
return ( mu, std_err )
print("Approximate lower bounds:")
posterior_mean, std_err = intervals(votes[:,0],votes[:,1])
lb = posterior_mean - std_err
print(lb)
print("\n")
print("Top 40 Sorted according to approximate lower bounds:")
print("\n")
order = np.argsort( -lb )
ordered_contents = []
for i in order[:40]:
ordered_contents.append( contents[i] )
print(votes[i,0], votes[i,1], contents[i])
print("-------------")
r_order = order[::-1][-40:]
plt.errorbar( posterior_mean[r_order], np.arange( len(r_order) ),
xerr=std_err[r_order], capsize=0, fmt="o",
color = "#7A68A6")
plt.xlim( 0.3, 1)
plt.yticks( np.arange( len(r_order)-1,-1,-1 ), map( lambda x: x[:30].replace("\n",""), ordered_contents) );
## Enter code here
import scipy.stats as stats
exp = stats.expon( scale=4 )
N = 1e5
X = exp.rvs( int(N) )
## ...
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in an Image
Step10: Ideas for Lane Detection Pipeline
Step11: Test Images
Step12: Build a Lane Finding Pipeline
Step13: Test on Videos
Step14: Let's try the one with the solid white lane on the right first ...
Step16: Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
Step18: Improve the draw_lines() function
Step20: Writeup and Submission
|
<ASSISTANT_TASK:>
Python Code:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
import math
def grayscale(img):
Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
def canny(img, low_threshold, high_threshold):
Applies the Canny transform
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
Applies a Gaussian Noise kernel
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
This funtion draw the lane lines in the road by appliying the following method:
1) Read the input lines data
2) Descriminate the data point into Rigth and Left line data points
3) Fit a stray line of the form y = mx + c
4) Solve for x's given y's [x = (y -c)/m]
The startin point of the lines is at the bottom of the screen, driver's point of view.
imshape = img.shape
# Initializing coordinate list
x_right, y_right = [], []
x_left, y_left = [], []
# Initializing tunning parameters
Mid_X = int(imshape[1] * 0.5)
Mid_y = int(imshape[0] * 0.65)
bottom_y = imshape[0]
# Descriminating data points between left and right
for line in lines:
for x1, y1, x2, y2 in line:
if x1 and x2 > Mid_X: # Check if the data is for the Right Line
x_right.extend((x1, x2))
y_right.extend((y1, y2))
else: # Assign data to the Left line
x_left.extend((x1, x2))
y_left.extend((y1, y2))
y1R, y2R = bottom_y, Mid_y # Right line initial coordnates
y1L, y2L = bottom_y, Mid_y # Left line initial coordnates
# Fitting the Right Line
if len(x_right) > 0: # Check for empty array
R_line_fit = np.polyfit(x_right, y_right, 1)
x1R = int(round((y1R - R_line_fit[1])/R_line_fit[0])) # Extrapolation line, Step 4
x2R = int(round((y2R - R_line_fit[1])/R_line_fit[0]))
cv2.line(img, (x1R, y1R), (x2R, y2R), color, thickness) # Drawing the Right Line
# Fitting the Left Line
if len(x_left) > 0: # Check for empty array
L_line_fit = np.polyfit(x_left, y_left, 1)
x1L = int(round((y1L - L_line_fit[1])/L_line_fit[0]))
x2L = int(round((y2L - L_line_fit[1])/L_line_fit[0]))
cv2.line(img, (x1L, y1L), (x2L, y2L), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
return cv2.addWeighted(initial_img, α, img, β, λ)
import os
os.listdir("test_images/")
def process_image(image):
'''Applies the Grayscale transform, Gaussian Blur, Canny Region Edge, and Define the Weighted
This will return an image with lane line'''
## generate great scale image
gray = grayscale(image)
#mpimg.imsave("test_images_output/" + "gray_"+ file, gray)
## apply Gaussian Blur
# Define a kernel size
kernel_size = 5
blurred = gaussian_blur(gray,kernel_size)
#mpimg.imsave("test_images_output/" + "blurred_"+ file, blurred)
# apply Canny Transformation
# Define thresholds
low_threshold = 50
high_threshold = 150
cannied = canny(blurred,low_threshold,high_threshold)
#mpimg.imsave("test_images_output/" + "canny_"+ file, cannied)
## get area of interest
# get vertices and apex
imshape = image.shape
Num_rows = imshape[0]
Num_colums = imshape[1]
vertices = np.array([[(100,Num_rows),(-30 + Num_colums*0.5, Num_rows*0.60), \
(30 + Num_colums*0.5, Num_rows*0.6), (Num_colums,Num_rows)]], \
dtype=np.int32)
roi = region_of_interest(cannied,vertices)
# apply Hough Transformation
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi # angular resolution in radians of the Hough grid
threshold = 3 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 5 # minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
line_image = np.copy(gray)*0 # creating a blank to draw lines on
houghed = hough_lines(roi, rho, theta, threshold, min_line_length, max_line_gap)
# apply weighted_img
# Define the Weighted Image parameters
α,β,λ = 0.8,0.8,0
# Create a "color" binary image to combine with line image
color_edges = np.dstack((cannied, cannied, cannied))
weighted = weighted_img(houghed,image,α,β,λ)
#mpimg.imsave("test_images_output/" + "weighted_"+ file, weighted)
#return gray
#return blurred
#return cannied
#return houghed
return weighted
for file in os.listdir("test_images/"): #Process all images in the directory
image = mpimg.imread(os.path.join("test_images/",file))
processed_image = process_image(image)
#mpimg.imsave("test_images_output/" + "gray_"+ file, processed_image)
#mpimg.imsave("test_images_output/" + "blurred_"+ file, processed_image)
#mpimg.imsave("test_images_output/" + "canny_"+ file, processed_image)
#mpimg.imsave("test_images_output/" + "houghed_"+ file, processed_image)
mpimg.imsave("test_images_output/" + "draw-line_"+ file, processed_image)
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(white_output))
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(yellow_output))
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(challenge_output))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This method was perhaps first introduced in the article “Soft Measure and
Step2: The first two sentences sentences have very similar content, and as such the
Step3: Next, we will build a dictionary and a TF-IDF model, and we will convert the
Step4: Now, as mentioned earlier, we will be using some downloaded pre-trained
Step5: So let's compute SCM using the inner_product method.
Step6: Let's try the same thing with two completely unrelated sentences.
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('scm-hello.png')
imgplot = plt.imshow(img)
plt.axis('off')
plt.show()
# Initialize logging.
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sentence_obama = 'Obama speaks to the media in Illinois'
sentence_president = 'The president greets the press in Chicago'
sentence_orange = 'Oranges are my favorite fruit'
# Import and download stopwords from NLTK.
from nltk.corpus import stopwords
from nltk import download
download('stopwords') # Download stopwords list.
stop_words = stopwords.words('english')
def preprocess(sentence):
return [w for w in sentence.lower().split() if w not in stop_words]
sentence_obama = preprocess(sentence_obama)
sentence_president = preprocess(sentence_president)
sentence_orange = preprocess(sentence_orange)
from gensim.corpora import Dictionary
documents = [sentence_obama, sentence_president, sentence_orange]
dictionary = Dictionary(documents)
sentence_obama = dictionary.doc2bow(sentence_obama)
sentence_president = dictionary.doc2bow(sentence_president)
sentence_orange = dictionary.doc2bow(sentence_orange)
from gensim.models import TfidfModel
documents = [sentence_obama, sentence_president, sentence_orange]
tfidf = TfidfModel(documents)
sentence_obama = tfidf[sentence_obama]
sentence_president = tfidf[sentence_president]
sentence_orange = tfidf[sentence_orange]
import gensim.downloader as api
model = api.load('word2vec-google-news-300')
from gensim.similarities import SparseTermSimilarityMatrix, WordEmbeddingSimilarityIndex
termsim_index = WordEmbeddingSimilarityIndex(model)
termsim_matrix = SparseTermSimilarityMatrix(termsim_index, dictionary, tfidf)
similarity = termsim_matrix.inner_product(sentence_obama, sentence_president, normalized=(True, True))
print('similarity = %.4f' % similarity)
similarity = termsim_matrix.inner_product(sentence_obama, sentence_orange, normalized=(True, True))
print('similarity = %.4f' % similarity)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import data files and word dictionaries
Step2: Pick a subset of users that have at least 200 reviews, run an iterative test on these users
Step3: Let's pick a specific user and start building out the recommendation on her data
Step4: Create a separate list for the review text and the review ratings, then aggregate them into a dataframe.
Step5: Connect to the mongodb and pull in the relevant restaurant reviews
Step10: Create a separate list for each review for the businesses that show up in the business_id list. Remove all reviews that relate to the current user.
Step11: Testing
Step12: Visualization ideas
|
<ASSISTANT_TASK:>
Python Code:
import json
import pandas as pd
import re
import string
from scipy import sparse
import numpy as np
from pymongo import MongoClient
from nltk.corpus import stopwords
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import log_loss
from sklearn.feature_extraction.text import TfidfVectorizer
import xgboost as xgb
from gensim import corpora, models, similarities, matutils
import tqdm
lh_neg = open('../input/negative-words.txt', 'r').read()
lh_neg = lh_neg.split('\n')
lh_pos = open('../input/positive-words.txt', 'r').read()
lh_pos = lh_pos.split('\n')
restaurant_reviews = json.load(open("../input/rest_review_dictionary.json"))
user_reviews_json = json.load(open("../input/user_review_dictionary.json"))
word_list = list(set(lh_pos + lh_neg))
bigusers = []
for i in tqdm.tqdm(range(0, len(user_reviews_json.keys()))):
if len(user_reviews_json[user_reviews_json.keys()[i]]) > 100:
bigusers.append(user_reviews_json.keys()[i])
user_specific_reviews = user_reviews_json[bigusers[3]]
user_reviews = []
user_ratings = []
business_ids = []
for review in user_specific_reviews:
user_reviews.append(review['text'])
user_ratings.append(review['stars'])
business_ids.append(review['business_id'])
user_reviews = [review.encode('utf-8').translate(None, string.punctuation) for review in user_reviews]
user_df = pd.DataFrame({'review_text': user_reviews, 'rating': user_ratings, 'biz_id': business_ids})
#Start a connection with the AWS instance and pull in the business reviews database
ip = '54.146.170.140'
conn = MongoClient(ip, 27017)
conn.database_names()
db = conn.get_database('cleaned_data')
reviews = db.get_collection('restaurant_reviews')
restreview = {}
for i in tqdm.tqdm(range(0, len(business_ids))):
rlist = []
for obj in reviews.find({'business_id':business_ids[i]}):
rlist.append(obj)
restreview[business_ids[i]] = rlist
user_id = user_reviews_json.keys()[29]
rest_reviews = []
rest_ratings = []
biz_ids = []
for i in tqdm.tqdm(range(0, len(restreview.keys()))):
for restaurant in restreview[restreview.keys()[i]]:
if restaurant['user_id'] != user_id:
rest_reviews.append(restaurant['text'])
rest_ratings.append(restaurant['stars'])
biz_ids.append(restreview.keys()[i])
else:
pass
restaurant_df = pd.DataFrame({'review_text': rest_reviews, 'rating': rest_ratings, 'biz_id': biz_ids})
#Feature objects and functions
stop_words = set(stopwords.words('english'))
def sent_percent(review):
regex_words = re.compile('[a-z]+')
words = [x.lower() for x in review.split(' ')]
words = [x for x in words if regex_words.match(x)]
pos_count, neg_count = 0, 0
for word in words:
if word in lh_pos:
pos_count += 1
elif word in lh_neg:
neg_count += 1
return [float(pos_count)/float(len(words)), float(neg_count)/float(len(words))]
pos_vectorizer = CountVectorizer(vocabulary = lh_pos)
neg_vectorizer = CountVectorizer(vocabulary = lh_neg)
class SentimentPercentage(BaseEstimator, TransformerMixin):
Takes in two lists of strings, extracts the lev distance between each string, returns list
def __init__(self):
pass
def transform(self, reviews):
##Take in a list of textual reviews and return a list with two elements:
##[Positive Percentage, Negative Percentage]
pos_vect = pos_vectorizer.transform(reviews)
neg_vect = neg_vectorizer.transform(reviews)
features = []
for i in range(0, len(reviews)):
sent_percentage = []
sent_percentage.append(float(pos_vect[i].sum())/float(len(reviews[i])))
sent_percentage.append(float(neg_vect[i].sum())/float(len(reviews[i])))
features.append(sent_percentage)
return np.array(features)
def fit(self, reviews, y=None, n_grams = None):
Returns `self` unless something different happens in train and test
return self
class TfIdfGramTransformer(BaseEstimator, TransformerMixin):
Takes in two lists of strings, extracts the lev distance between each string, returns list
def __init__(self):
pass
def transform(self, reviews):
tf_vector = vectorizer.transform(reviews)
return tf_vector
def fit(self, reviews, y=None, n_grams = (0,1)):
vectorizer = TfidfVectorizer(ngram_range = n_grams, stop_words = 'english')
vectorizer.fit(reviews)
Returns `self` unless something different happens in train and test
return vectorizer
#Benchmark is simply 50/50 for each prediciton, so let's take a look at the log loss for that case
benchmark_results = [0.5] * len(test_labels)
print "The number to beat is: " + str(log_loss(test_labels, benchmark_results))
#Now, let's do this process iteratively for a larger sub sample of test reviews
#First, start by splitting the restaurants that the user has reviewed into training and test sets
split_samp = .20
test_set = business_ids[0:int(len(business_ids) * split_samp)]
training_set = business_ids[int(len(business_ids) * split_samp): len(business_ids)]
train_reviews, train_ratings = [], []
for rest_id in training_set:
train_reviews.extend(list(user_df[user_df['biz_id'] == rest_id]['review_text']))
train_ratings.extend(list(user_df[user_df['biz_id'] == rest_id]['rating']))
#Transform the star labels into a binary class problem, 0 if rating is < 4 else 1
train_labels = [1 if x >=4 else 0 for x in train_ratings ]
###########################
####LSI Features
###########################
texts = [[word for word in review.lower().split() if (word not in stop_words)]
for review in train_reviews]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
numpy_matrix = matutils.corpus2dense(corpus, num_terms=50000)
singular_values = np.linalg.svd(numpy_matrix, full_matrices=False, compute_uv=False)
mean_sv = sum(list(singular_values))/len(singular_values)
topics = int(mean_sv)
stop_words = set(stopwords.words('english'))
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=topics)
index = similarities.MatrixSimilarity(lsi[corpus_tfidf], num_features = 10000)
train_lsi = lsi[corpus_tfidf]
train_lsi = [[train[1] for train in train_review] for train_review in train_lsi]
train_lsi = [[0.0000000001] * topics if len(x) != topics else x for x in train_lsi]
train_lsi = sparse.coo_matrix(train_lsi)
train_features = train_lsi
#XGBoost training
#gbm = xgb.XGBClassifier(max_depth=10, n_estimators=400, learning_rate=0.02).fit(train_features, train_labels)
#RandomForest training
#rf = RandomForestClassifier()
rf.fit(train_features, train_labels)
#SVM training
svm_classifier = svm.SVC(kernel='linear')
svm_classifier.fit(train_features, train_labels)
error = []
for i in tqdm.tqdm(range(0,len(test_set))):
predicted_rating = 0
#Get reviews for that restaurant
test_reviews =[]
test_reviews.extend(list(restaurant_df[restaurant_df['biz_id'] == test_set[i]]['review_text']))
#Transform features
test_features = comb_features.transform(test_reviews)
#LSI Features
test_texts = [[word for word in test_set[i].lower().split() if (word not in stop_words)]
for review in test_reviews]
test_corpus = [dictionary.doc2bow(test) for test in test_texts]
test_tfidf = tfidf[test_corpus]
test_lsi = lsi[test_tfidf]
test_lsi = [[test[1] for test in test_review] for test_review in test_lsi]
test_lsi = [[0.0000000001] * topics if len(x) != topics else x for x in test_lsi]
test_lsi = sparse.coo_matrix(test_lsi)
#stacked_test_features = sparse.hstack((test_features, test_lsi))
stacked_test_features = test_lsi
#Get XGBoost prediction
#test_prediction = gbm.predict(stacked_test_features)
#Get SVM prediction
#test_prediction = svm_classifier.predict(stacked_test_features)
#Get Random Forest prediction
test_prediction = rf.predict(stacked_test_features)
if test_prediction.mean() > 0.7:
predicted_rating = 1
actual_rating = list(user_df[user_df['biz_id'] == test_set[i]]['rating'])[0]
if actual_rating >= 4:
actual_rating = 1
else:
actual_rating = 0
error.append(abs(predicted_rating - actual_rating))
print "The LSA mean absolute error is: " + str(sum(error) / float(len(error)))
#print "The svm (1,1) average mean absolute error is: " + str(sum(svm_error) / float(len(svm_error)))
comb_features = FeatureUnion([('sent_percent',SentimentPercentage()),('tf', TfIdfGramTransformer()),
('lda', Pipeline([('bow', TfidfVectorizer(stop_words='english',ngram_range=(1,1))),
('lda_transform', LatentDirichletAllocation(n_topics=int(mean_sv)))]))
])
comb_features.fit(train_reviews)
train_features = comb_features.transform(train_reviews)
#XGBoost training
#gbm = xgb.XGBClassifier(max_depth=10, n_estimators=400, learning_rate=0.02).fit(train_features, train_labels)
#RandomForest training
rf = RandomForestClassifier(max_depth = 100, max_leaf_nodes=50)
rf.fit(train_features, train_labels)
#SVM training
# svm_classifier = svm.LinearSVC()
# svm_classifier.fit(train_features, train_labels)
test_error = []
for i in tqdm.tqdm(range(0,len(test_set))):
predicted_rating = 0
#Get reviews for that restaurant
test_reviews =[]
test_reviews.extend(list(restaurant_df[restaurant_df['biz_id'] == test_set[i]]['review_text']))
#Transform features
stacked_test_features = comb_features.transform(test_reviews)
#Get XGBoost prediction
#test_prediction = gbm.predict(stacked_test_features)
#Get SVM prediction
#test_prediction = svm_classifier.predict(stacked_test_features)
#Get Random Forest prediction
test_prediction = rf.predict(stacked_test_features)
if test_prediction.mean() > 0.7:
predicted_rating = 1
actual_rating = list(user_df[user_df['biz_id'] == test_set[i]]['rating'])[0]
if actual_rating >= 4:
actual_rating = 1
else:
actual_rating = 0
test_error.append(abs(predicted_rating - actual_rating))
print "The LDA mean absolute error is: " + str(sum(test_error) / float(len(test_error)))
from sklearn.naive_bayes import MultinomialNB
comb_features = FeatureUnion([('sent_percent',SentimentPercentage()),('tf', TfIdfGramTransformer()),
('lda', Pipeline([('bow', TfidfVectorizer(stop_words='english', ngram_range = (1,1))),
('lda_transform', LatentDirichletAllocation(n_topics=500))]))
])
comb_features.fit(train_reviews)
train_features = comb_features.transform(train_reviews)
train_features = sparse.hstack((train_features, train_lsi))
train_features = train_features.todense()
#XGBoost training
#gbm = xgb.XGBClassifier(max_depth=10, n_estimators=500, learning_rate=0.02, ).fit(train_features, train_labels)
#RandomForest training
#rf = RandomForestClassifier()
#rf.fit(train_features, train_labels)
#SVM training
svm_classifier = svm.LinearSVC()
svm_classifier.fit(train_features, train_labels)
#Naive Bayes Training, note Naive Bayes requires a dense matrix for training and testing
#nb_clf = GaussianNB()
#nb_clf.fit(train_features, train_labels)
comb_error = []
test_predictions = []
for i in tqdm.tqdm(range(0,len(test_set))):
predicted_rating = 0
#Get reviews for that restaurant
test_reviews =[]
test_reviews.extend(list(restaurant_df[restaurant_df['biz_id'] == test_set[i]]['review_text']))
#Transform features
test_features = comb_features.transform(test_reviews)
#LSI Features
test_texts = [[word for word in review.lower().split() if (word not in stop_words)]
for review in test_reviews]
test_corpus = [dictionary.doc2bow(test) for test in test_texts]
test_tfidf = tfidf[test_corpus]
test_lsi = lsi[test_tfidf]
test_lsi = [[test[1] for test in test_review] for test_review in test_lsi]
test_lsi = [[0.000000000001] * topics if len(x) != topics else x for x in test_lsi]
test_lsi = sparse.coo_matrix(test_lsi)
stacked_test_features = sparse.hstack((test_features, test_lsi))
stacked_test_features = stacked_test_features.todense()
#Get XGBoost prediction
#test_prediction = gbm.predict(stacked_test_features)
#Get SVM prediction
test_prediction = svm_classifier.predict(stacked_test_features)
#Get Random Forest prediction
#test_prediction = rf.predict(stacked_test_features)
#Get NB prediction
#test_prediction = nb_clf.predict(stacked_test_features)
if test_prediction.mean() > 0.7:
predicted_rating = 1
actual_rating = list(user_df[user_df['biz_id'] == test_set[i]]['rating'])[0]
if actual_rating >= 4:
actual_rating = 1
else:
actual_rating = 0
test_predictions.append((test_prediction, actual_rating))
comb_error.append(abs(predicted_rating - actual_rating))
print "The LDA + LSA mean absolute error is: " + str(sum(comb_error) / float(len(comb_error)))
confidence_tuple = [(float(sum(list(x[0])))/float(len(x[0])),x[1]) for x in test_predictions]
confidence_tuple.sort()
top5 = confidence_tuple[-10:]
print "Got a rec accuracy of: " + str(float(sum([x[1] for x in confidence_tuple[-10:]]))/float(10))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Welcome to Exampville, the best simulated town in this here part of the internet!
Step2: TAZ Shapefile
Step3: Geopandas can open and read this data directly with unzipping it on
Step4: TAZ Employment Data
Step5: Skims
Step6: Households
Step7: Persons
Step8: Tours
|
<ASSISTANT_TASK:>
Python Code:
import larch, numpy, pandas, os, geopandas
larch.__version__
import larch.exampville
larch.exampville.files.shapefile
taz_shape = geopandas.read_file("zip://"+larch.exampville.files.shapefile)
taz_shape.plot(edgecolor='k');
larch.exampville.files.employment
emp = pandas.read_csv(larch.exampville.files.employment, index_col='TAZ')
emp.info()
emp.head()
larch.exampville.files.skims
skims = larch.OMX( larch.exampville.files.skims, mode='r' )
skims
larch.exampville.files.hh
hh = pandas.read_csv( larch.exampville.files.hh )
hh.info()
hh.head()
larch.exampville.files.person
pp = pandas.read_csv( larch.exampville.files.person )
pp.info()
pp.head()
larch.exampville.files.tour
tour = pandas.read_csv( larch.exampville.files.tour )
tour.info()
tour.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's make a graph 'bout it
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
print(df['gender'].value_counts())
df.groupby('gender')['networthusbillion'].mean()
df.groupby('gender')['sourceofwealth'].value_counts()
df.plot(kind='scatter', x='gender', y='networthusbillion')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example, retrieve all the calibrator with a flux > 0.1 Jy
Step2: Select all calibrators that heve been observed at least in 3 Bands [ >60s in B3, B6, B7]
Step3: We can write a "report file" or only use the "resume data", some will have redshift data retrieved from NED.
Step4: Sometimes there is no redshift information found in NED
Step5: Select objects which has redshift
Step6: Plot Flux vs Redshift
Step7: Plot log(Luminosity) vs redshift
Step9: How to calculate luminosity
Step10: Plot $\log_{10}(L)$ vs $z$
Step11: Black-dashed line are for 0.1 Jy flux.
|
<ASSISTANT_TASK:>
Python Code:
file_listcal = "alma_sourcecat_searchresults_20180419.csv"
q = databaseQuery()
listcal = q.read_calibratorlist(file_listcal, fluxrange=[0.1, 999999])
len(listcal)
print("Name: ", listcal[0][0])
print("J2000 RA, dec: ", listcal[0][1], listcal[0][2])
print("Alias: ", listcal[0][3])
print("Flux density: ", listcal[0][4])
print("Band: ", listcal[0][5])
print("Freq: ", listcal[0][6])
print("Obs date: ", listcal[0][4])
report, resume = q.select_object_from_sqldb("calibrators_brighterthan_0.1Jy_20180419.db", \
maxFreqRes=999999999, array='12m', \
excludeCycle0=True, \
selectPol=False, \
minTimeBand={3:60., 6:60., 7:60.}, \
silent=True)
print("Name: ", resume[0][0])
print("From NED: ")
print("Name: ", resume[0][3])
print("J2000 RA, dec: ", resume[0][4], resume[0][5])
print("z: ", resume[0][6])
print("Total # of projects: ", resume[0][7])
print("Total # of UIDs: ", resume[0][8])
print("Gal lon: ", resume[0][9])
print("Gal lat: ", resume[0][10])
for i, obj in enumerate(resume):
for j, cal in enumerate(listcal):
if obj[0] == cal[0]: # same name
obj.append(cal[4:]) # add [flux, band, flux obsdate] in the "resume"
def collect_z_and_flux(Band):
z = []
flux = []
for idata in resume:
if idata[6] is not None: # select object which has redshift information
fluxnya = idata[11][0]
bandnya = idata[11][1]
freqnya = idata[11][2]
datenya = idata[11][3]
for i, band in enumerate(bandnya):
if band == str(Band): # take only first data
flux.append(fluxnya[i])
z.append(idata[6])
break
return z, flux
z3, f3 = collect_z_and_flux(3)
print("Number of seleted source in B3: ", len(z3))
z6, f6 = collect_z_and_flux(6)
print("Number of seleted source in B6: ", len(z6))
z7, f7 = collect_z_and_flux(7)
print("Number of seleted source in B7: ", len(z7))
plt.figure(figsize=(15,10))
plt.subplot(221)
plt.plot(z3, f3, 'ro')
plt.xlabel("z")
plt.ylabel("Flux density (Jy)")
plt.title("B3")
plt.subplot(222)
plt.plot(z6, f6, 'go')
plt.xlabel("z")
plt.ylabel("Flux density (Jy)")
plt.title("B6")
plt.subplot(223)
plt.plot(z7, f7, 'bo')
plt.xlabel("z")
plt.ylabel("Flux density (Jy)")
plt.title("B7")
plt.subplot(224)
plt.plot(z3, f3, 'ro', z6, f6, 'go', z7, f7, 'bo', alpha=0.3)
plt.xlabel("z")
plt.ylabel("Flux density (Jy)")
plt.title("B3, B6, B7")
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
def calc_power(z, flux):
z = redshift
flux in Jy
z = np.array(z)
flux = np.array(flux)
dL = cosmo.luminosity_distance(z).to(u.meter).value # Luminosity distance
luminosity = 4.0*np.pi*dL*dL/(1.0+z) * flux * 1e-26
return z, luminosity
z3, l3 = calc_power(z3, f3)
z6, l6 = calc_power(z6, f6)
z7, l7 = calc_power(z7, f7)
zdummy = np.linspace(0.001, 2.5, 100)
fdummy = 0.1 # Jy, our cut threshold
zdummy, Ldummy0 = calc_power(zdummy, fdummy)
zdummy, Ldummy3 = calc_power(zdummy, np.max(f3))
zdummy, Ldummy6 = calc_power(zdummy, np.max(f6))
zdummy, Ldummy7 = calc_power(zdummy, np.max(f7))
plt.figure(figsize=(15,10))
plt.subplot(221)
plt.plot(z3, np.log10(l3), 'r*', \
zdummy, np.log10(Ldummy0), 'k--', zdummy, np.log10(Ldummy3), 'r--', alpha=0.5)
plt.xlabel(r"$z$"); plt.ylabel(r"$\log_{10}(L_{\nu_e})$"); plt.title("B3")
plt.subplot(222)
plt.plot(z6, np.log10(l6), 'g*', \
zdummy, np.log10(Ldummy0), 'k--', zdummy, np.log10(Ldummy6), 'g--', alpha=0.5)
plt.xlabel(r"$z$"); plt.ylabel(r"$\log_{10}(L_{\nu_e})$"); plt.title("B6")
plt.subplot(223)
plt.plot(z7, np.log10(l7), 'b*', \
zdummy, np.log10(Ldummy0), 'k--', zdummy, np.log10(Ldummy7), 'b--', alpha=0.5)
plt.xlabel(r"$z$"); plt.ylabel(r"$\log_{10}(L_{\nu_e})$"); plt.title("B7")
plt.subplot(224)
plt.plot(z3, np.log10(l3), 'r*', z6, np.log10(l6), 'g*', z7, np.log10(l7), 'b*', \
zdummy, np.log10(Ldummy0), 'k--', \
zdummy, np.log10(Ldummy3), 'r--', \
zdummy, np.log10(Ldummy6), 'g--', \
zdummy, np.log10(Ldummy7), 'b--', alpha=0.5)
plt.xlabel(r"$z$"); plt.ylabel(r"$\log_{10}(L_{\nu_e})$"); plt.title("B3, B6, B7")
plt.figure(figsize=(15,10))
plt.subplot(221)
plt.plot(z3, l3, 'r*', zdummy, Ldummy0, 'k--', zdummy, Ldummy3, 'r--', alpha=0.5)
plt.xlabel(r"$z$"); plt.ylabel(r"$\log_{10}(L_{\nu_e})$"); plt.title("B3")
plt.subplot(222)
plt.plot(z6, l6, 'g*', zdummy, Ldummy0, 'k--', zdummy, Ldummy6, 'g--', alpha=0.5)
plt.xlabel(r"$z$"); plt.ylabel(r"$\log_{10}(L_{\nu_e})$"); plt.title("B6")
plt.subplot(223)
plt.plot(z7, l7, 'b*', zdummy, Ldummy0, 'k--', zdummy, Ldummy7, 'b--', alpha=0.5)
plt.xlabel(r"$z$"); plt.ylabel(r"$\log_{10}(L_{\nu_e})$"); plt.title("B7")
plt.subplot(224)
plt.plot(z3, l3, 'r*', z6, l6, 'g*', z7, l7, 'b*', \
zdummy, Ldummy0, 'k--', zdummy, Ldummy3, 'r--', \
zdummy, Ldummy6, 'g--', zdummy, Ldummy7, 'b--', alpha=0.5)
plt.xlabel(r"$z$"); plt.ylabel(r"$\log_{10}(L_{\nu_e})$"); plt.title("B3, B6, B7")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading and Reviewing the Data
Step2: Definition of features
Step4: Correlated Feature Check
Step5: The skin and thickness columns are correlated 1 to 1. Dropping the skin column
Step6: Check for additional correlations
Step7: The correlations look good. There appear to be no coorelated columns.
Step8: Change diabetes from boolean to integer, True=1, False=0
Step9: Verify that the diabetes data type has been changed.
Step10: Check for null values
Step11: No obvious null values.
Step12: Good distribution of true and false cases. No special work needed.
Step13: We check to ensure we have the the desired 70% train, 30% test split of the data
Step14: Verifying predicted value was split correctly
Step15: Post-split Data Preparation
Step16: Are these 0 values possible?
Step17: Impute with the mean
Step18: Training Initial Algorithm - Naive Bayes
Step19: Performance on Training Data
Step20: Performance on Testing Data
Step21: Metrics
Step22: Random Forest
Step23: Predict Training Data
Step24: Predict Test Data
Step25: Logistic Regression
Step26: Setting regularization parameter
Step27: Logisitic regression with class_weight='balanced'
Step28: LogisticRegressionCV
Step29: Predict on Test data
Step30: Using your trained Model
Step31: Load trained model from file
Step32: Test Prediction on data
Step33: The truncated file contained 4 rows from the original CSV.
Step34: We need to drop the diabetes column since that is what we are predicting.
Step35: Data has 0 in places it should not.
Step36: At this point our data is ready to be used for prediction.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd # pandas is a dataframe library
import matplotlib.pyplot as plt # matplotlib.pyplot plots data
%matplotlib inline
df = pd.read_csv("./data/pima-data.csv")
df.shape
df.head(5)
df.tail(5)
df.isnull().values.any()
def plot_corr(df, size=11):
Function plots a graphical correlation matrix for each pair of columns in the dataframe.
Input:
df: pandas DataFrame
size: vertical and horizontal size of the plot
Displays:
matrix of correlation between columns. Blue-cyan-yellow-red-darkred => less to more correlated
0 ------------------> 1
Expect a darkred line running from top left to bottom right
corr = df.corr() # data frame correlation function
fig, ax = plt.subplots(figsize=(size, size))
ax.matshow(corr) # color code the rectangles by correlation value
plt.xticks(range(len(corr.columns)), corr.columns) # draw x tick marks
plt.yticks(range(len(corr.columns)), corr.columns) # draw y tick marks
plot_corr(df)
df.corr()
df.head(5)
del df['skin']
df.head(5)
plot_corr(df)
df.head(5)
diabetes_map = {True : 1, False : 0}
df['diabetes'] = df['diabetes'].map(diabetes_map)
df.head(5)
df.isnull().values.any()
num_obs = len(df)
num_true = len(df.loc[df['diabetes'] == 1])
num_false = len(df.loc[df['diabetes'] == 0])
print("Number of True cases: {0} ({1:2.2f}%)".format(num_true, (num_true/num_obs) * 100))
print("Number of False cases: {0} ({1:2.2f}%)".format(num_false, (num_false/num_obs) * 100))
from sklearn.cross_validation import train_test_split
feature_col_names = ['num_preg', 'glucose_conc', 'diastolic_bp', 'thickness', 'insulin', 'bmi', 'diab_pred', 'age']
predicted_class_names = ['diabetes']
X = df[feature_col_names].values # predictor feature columns (8 X m)
y = df[predicted_class_names].values # predicted class (1=true, 0=false) column (1 X m)
split_test_size = 0.30
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split_test_size, random_state=42)
# test_size = 0.3 is 30%, 42 is the answer to everything
print("{0:0.2f}% in training set".format((len(X_train)/len(df.index)) * 100))
print("{0:0.2f}% in test set".format((len(X_test)/len(df.index)) * 100))
print("Original True : {0} ({1:0.2f}%)".format(len(df.loc[df['diabetes'] == 1]), (len(df.loc[df['diabetes'] == 1])/len(df.index)) * 100.0))
print("Original False : {0} ({1:0.2f}%)".format(len(df.loc[df['diabetes'] == 0]), (len(df.loc[df['diabetes'] == 0])/len(df.index)) * 100.0))
print("")
print("Training True : {0} ({1:0.2f}%)".format(len(y_train[y_train[:] == 1]), (len(y_train[y_train[:] == 1])/len(y_train) * 100.0)))
print("Training False : {0} ({1:0.2f}%)".format(len(y_train[y_train[:] == 0]), (len(y_train[y_train[:] == 0])/len(y_train) * 100.0)))
print("")
print("Test True : {0} ({1:0.2f}%)".format(len(y_test[y_test[:] == 1]), (len(y_test[y_test[:] == 1])/len(y_test) * 100.0)))
print("Test False : {0} ({1:0.2f}%)".format(len(y_test[y_test[:] == 0]), (len(y_test[y_test[:] == 0])/len(y_test) * 100.0)))
df.head()
print("# rows in dataframe {0}".format(len(df)))
print("# rows missing glucose_conc: {0}".format(len(df.loc[df['glucose_conc'] == 0])))
print("# rows missing diastolic_bp: {0}".format(len(df.loc[df['diastolic_bp'] == 0])))
print("# rows missing thickness: {0}".format(len(df.loc[df['thickness'] == 0])))
print("# rows missing insulin: {0}".format(len(df.loc[df['insulin'] == 0])))
print("# rows missing bmi: {0}".format(len(df.loc[df['bmi'] == 0])))
print("# rows missing diab_pred: {0}".format(len(df.loc[df['diab_pred'] == 0])))
print("# rows missing age: {0}".format(len(df.loc[df['age'] == 0])))
from sklearn.preprocessing import Imputer
#Impute with mean all 0 readings
fill_0 = Imputer(missing_values=0, strategy="mean", axis=0)
X_train = fill_0.fit_transform(X_train)
X_test = fill_0.fit_transform(X_test)
from sklearn.naive_bayes import GaussianNB
# create Gaussian Naive Bayes model object and train it with the data
nb_model = GaussianNB()
nb_model.fit(X_train, y_train.ravel())
# predict values using the training data
nb_predict_train = nb_model.predict(X_train)
# import the performance metrics library
from sklearn import metrics
# Accuracy
print("Accuracy: {0:.4f}".format(metrics.accuracy_score(y_train, nb_predict_train)))
print()
# predict values using the testing data
nb_predict_test = nb_model.predict(X_test)
from sklearn import metrics
# training metrics
print("Accuracy: {0:.4f}".format(metrics.accuracy_score(y_test, nb_predict_test)))
print("Confusion Matrix")
print("{0}".format(metrics.confusion_matrix(y_test, nb_predict_test)))
print("")
print("Classification Report")
print(metrics.classification_report(y_test, nb_predict_test))
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier(random_state=42) # Create random forest object
rf_model.fit(X_train, y_train.ravel())
rf_predict_train = rf_model.predict(X_train)
# training metrics
print("Accuracy: {0:.4f}".format(metrics.accuracy_score(y_train, rf_predict_train)))
rf_predict_test = rf_model.predict(X_test)
# training metrics
print("Accuracy: {0:.4f}".format(metrics.accuracy_score(y_test, rf_predict_test)))
print(metrics.confusion_matrix(y_test, rf_predict_test) )
print("")
print("Classification Report")
print(metrics.classification_report(y_test, rf_predict_test))
from sklearn.linear_model import LogisticRegression
lr_model =LogisticRegression(C=0.7, random_state=42)
lr_model.fit(X_train, y_train.ravel())
lr_predict_test = lr_model.predict(X_test)
# training metrics
print("Accuracy: {0:.4f}".format(metrics.accuracy_score(y_test, lr_predict_test)))
print(metrics.confusion_matrix(y_test, lr_predict_test) )
print("")
print("Classification Report")
print(metrics.classification_report(y_test, lr_predict_test))
C_start = 0.1
C_end = 5
C_inc = 0.1
C_values, recall_scores = [], []
C_val = C_start
best_recall_score = 0
while (C_val < C_end):
C_values.append(C_val)
lr_model_loop = LogisticRegression(C=C_val, random_state=42)
lr_model_loop.fit(X_train, y_train.ravel())
lr_predict_loop_test = lr_model_loop.predict(X_test)
recall_score = metrics.recall_score(y_test, lr_predict_loop_test)
recall_scores.append(recall_score)
if (recall_score > best_recall_score):
best_recall_score = recall_score
best_lr_predict_test = lr_predict_loop_test
C_val = C_val + C_inc
best_score_C_val = C_values[recall_scores.index(best_recall_score)]
print("1st max value of {0:.3f} occured at C={1:.3f}".format(best_recall_score, best_score_C_val))
%matplotlib inline
plt.plot(C_values, recall_scores, "-")
plt.xlabel("C value")
plt.ylabel("recall score")
C_start = 0.1
C_end = 5
C_inc = 0.1
C_values, recall_scores = [], []
C_val = C_start
best_recall_score = 0
while (C_val < C_end):
C_values.append(C_val)
lr_model_loop = LogisticRegression(C=C_val, class_weight="balanced", random_state=42)
lr_model_loop.fit(X_train, y_train.ravel())
lr_predict_loop_test = lr_model_loop.predict(X_test)
recall_score = metrics.recall_score(y_test, lr_predict_loop_test)
recall_scores.append(recall_score)
if (recall_score > best_recall_score):
best_recall_score = recall_score
best_lr_predict_test = lr_predict_loop_test
C_val = C_val + C_inc
best_score_C_val = C_values[recall_scores.index(best_recall_score)]
print("1st max value of {0:.3f} occured at C={1:.3f}".format(best_recall_score, best_score_C_val))
%matplotlib inline
plt.plot(C_values, recall_scores, "-")
plt.xlabel("C value")
plt.ylabel("recall score")
from sklearn.linear_model import LogisticRegression
lr_model =LogisticRegression( class_weight="balanced", C=best_score_C_val, random_state=42)
lr_model.fit(X_train, y_train.ravel())
lr_predict_test = lr_model.predict(X_test)
# training metrics
print("Accuracy: {0:.4f}".format(metrics.accuracy_score(y_test, lr_predict_test)))
print(metrics.confusion_matrix(y_test, lr_predict_test) )
print("")
print("Classification Report")
print(metrics.classification_report(y_test, lr_predict_test))
print(metrics.recall_score(y_test, lr_predict_test))
from sklearn.linear_model import LogisticRegressionCV
lr_cv_model = LogisticRegressionCV(n_jobs=-1, random_state=42, Cs=3, cv=10, refit=False, class_weight="balanced") # set number of jobs to -1 which uses all cores to parallelize
lr_cv_model.fit(X_train, y_train.ravel())
lr_cv_predict_test = lr_cv_model.predict(X_test)
# training metrics
print("Accuracy: {0:.4f}".format(metrics.accuracy_score(y_test, lr_cv_predict_test)))
print(metrics.confusion_matrix(y_test, lr_cv_predict_test) )
print("")
print("Classification Report")
print(metrics.classification_report(y_test, lr_cv_predict_test))
from sklearn.externals import joblib
joblib.dump(lr_cv_model, "./data/pima-trained-model.pkl")
lr_cv_model = joblib.load("./data/pima-trained-model.pkl")
# get data from truncated pima data file
df_predict = pd.read_csv("./data/pima-data-trunc.csv")
print(df_predict.shape)
df_predict
del df_predict['skin']
df_predict
X_predict = df_predict
del X_predict['diabetes']
#Impute with mean all 0 readings
fill_0 = Imputer(missing_values=0, strategy="mean", axis=0)
X_predict = fill_0.fit_transform(X_predict)
lr_cv_model.predict(X_predict)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reformat into a TensorFlow-friendly shape
Step2: Let's build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we'll limit its depth and number of fully connected nodes.
Step3: Problem 1
Step4: Output
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
pickle_file = '../notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
num_channels = 1 # grayscale
import numpy as np
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_size, image_size, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
batch_size = 16
patch_size = 5
depth = 16
num_hidden = 64
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1))
layer1_biases = tf.Variable(tf.zeros([depth]))
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev=0.1))
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))
layer3_weights = tf.Variable(tf.truncated_normal(
[image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1))
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
# Model.
def model(data): #data of shape [batch_size, image_size, image_size, num_channels]
conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME') # shape of [batch_size, image_size/2, image_size/2, depth]
hidden = tf.nn.relu(conv + layer1_biases)
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME')# shape of [batch_size, image_size/4, image_size/4, depth]
hidden = tf.nn.relu(conv + layer2_biases)
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])# shape of [batch_size, image_size/4 * image_size/4* depth]
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases) # shape of [batch_size,num_hidden]
return tf.matmul(hidden, layer4_weights) + layer4_biases # shape of [batch_size,num_labels]
# Training computation.
logits = model(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
num_steps = 1001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 50 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
# Variables.
layer3_weights = tf.Variable(tf.truncated_normal(
[image_size // 2 * image_size // 2 * num_channels, num_hidden], stddev=0.1))
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1))
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
# Model.
def model(data):
# Data is shaped of [batch_size, image_size, image_size, num_channels]
hidden = tf.nn.max_pool(data, [1, 2, 2, 1],[1, 2, 2, 1] , padding='SAME') #same shape of [batch_size, image_size/2, image_size/2, num_channels]
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]]) #reshaped into 2D array of [batch_size, image_size/2* image_size/2 * num_channels]
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
return tf.matmul(hidden, layer4_weights) + layer4_biases
# Variables.
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1))
layer1_biases = tf.Variable(tf.zeros([depth]))
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev=0.1))
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))
layer3_weights = tf.Variable(tf.truncated_normal(
[math.ceil(image_size / 16) * math.ceil(image_size /16) * depth, num_hidden], stddev=0.1))
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1))
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
# Model.
def model(data):
conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME') # shape of [batch_size, image_size/2, image_size/2, depth]: [16, 14, 14, 16]
shape1 = conv.get_shape().as_list()
hidden = tf.nn.relu(conv + layer1_biases)
hidden1 = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1] , padding='SAME') #shape of [batch_size, image_size/4, image_size/4, depth]: [16, 7, 7, 16]
shape2 = hidden1.get_shape().as_list()
conv = tf.nn.conv2d(hidden1, layer2_weights, [1, 2, 2, 1], padding='SAME') #shape of [batch_size, image_size/8, image_size/8, depth]: [16, 4, 4, 16]
shape3 = conv.get_shape().as_list()
hidden = tf.nn.relu(conv + layer2_biases)
hidden2 = tf.nn.max_pool(hidden, [1, 2, 2, 1],[1, 2, 2, 1] , padding='SAME') #same shape of [batch_size, image_size/16, image_size/16, depth]: [16, 2, 2, 16]
shape = hidden2.get_shape().as_list()
reshape = tf.reshape(hidden2, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
return tf.matmul(hidden, layer4_weights) + layer4_biases
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Description
Step2: SOLUTION
|
<ASSISTANT_TASK:>
Python Code:
%pylab notebook
p = 12
n_m = 600 # [r/min]
n_pulses = 3*p*n_m
print('''
n_pulses = {:.0f} pulses/min = {:.0f} pulses/sec
============================================'''.format(n_pulses, n_pulses/60))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
from scipy import sparse
c1 = sparse.csr_matrix([[0, 0, 1, 0], [2, 0, 0, 0], [0, 0, 0, 0]])
c2 = sparse.csr_matrix([[0, 3, 4, 0], [0, 0, 0, 5], [6, 7, 0, 8]])
Feature = sparse.hstack((c1, c2)).tocsr()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: scikit-learn準拠の識別器を作る
Step2: 精度の評価
Step3: 世の中の機械学習モデルをいくつか試す
Step5: Confusion Matrix
Step6: グリッドサーチで最適なパラーメタを探す
|
<ASSISTANT_TASK:>
Python Code:
# ライブラリのインポート
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import cross_val_score
from IPython.display import Image
import pydotplus
# データを読み込む
iris = datasets.load_iris()
# データを見てみる
print("Iris Data")
df = pd.DataFrame(iris.data)
df['target'] = iris.target
print("data = ", iris.feature_names)
print("target = ", iris.target_names)
df
# データをプロットしてみる
plt.clf()
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(11, 10))
axarr[0, 0].scatter(iris.data[:, 0], iris.data[:, 1], c=iris.target, alpha=0.6)
axarr[0, 0].set_title(iris.feature_names[0] + " vs " + iris.feature_names[1])
axarr[0, 1].scatter(iris.data[:, 1], iris.data[:, 2], c=iris.target, alpha=0.6)
axarr[0, 1].set_title(iris.feature_names[1] + " vs " + iris.feature_names[2])
axarr[1, 0].scatter(iris.data[:, 0], iris.data[:, 3], c=iris.target, alpha=0.6)
axarr[1, 0].set_title(iris.feature_names[0] + " vs " + iris.feature_names[3])
axarr[1, 1].scatter(iris.data[:, 2], iris.data[:, 3], c=iris.target, alpha=0.6)
axarr[1, 1].set_title(iris.feature_names[2] + " vs " + iris.feature_names[3])
plt.show()
from sklearn.base import BaseEstimator, ClassifierMixin
class IrisClassifier(BaseEstimator, ClassifierMixin):
def __init__(self):
pass
def fit(self, x, y):
return self
def predict_proba(self, x_list):
return [self.predict_proba_sample(x) for x in x_list]
def predict(self, x_list):
proba = self.predict_proba(x_list)
most_probable_category = np.argmax(proba, axis=1)
return most_probable_category
def predict_proba_sample(self, x):
# e.g. x = [5.1 3.5 1.4 0.2] should be classified as 0 ([1, 0, 0])
FILL HERE
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
## 評価
### 単純化のために2次元の特徴量のみを使う
X = iris.data[:, [0, 1]]
y = iris.target
clf = IrisClassifier()
clf.fit(X, y)
# 予測する
predicted = clf.predict(X)
# 精度(accuracy)
print("Accuracy = ", clf.score(X, y))
# 単純化のために2次元の特徴量のみを使う
X = iris.data[:, [0, 1]]
y = iris.target
# 識別器のインスタンスをつくる. SVM
svm = SVC(kernel='rbf', probability=True)
# 学習させる
svm.fit(X, y)
# 学習データに対する精度
print("Score = {0}".format(svm.score(X, y)))
# 最初の10個を予測する
for x in X[:10]:
print("Predict f(%s) = %s" % (x, svm.predict([x])))
# 他の識別器
classifiers = [
SVC(kernel='rbf', probability=True),
DecisionTreeClassifier(max_depth=2),
KNeighborsClassifier(n_neighbors=1),
IrisClassifier(),
]
# 同じインタフェース で使える
for classifier in classifiers:
classifier.fit(X, y)
# Plotting decision regions
plt.clf()
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.05), np.arange(y_min, y_max, 0.05))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(11, 10))
for index, classifier, title in zip([0, 1, 2, 3], classifiers, ['Kernel SVM', 'Decision Tree (depth=2)', 'KNN (k=1)', 'IrisClassifer']):
predicted = classifier.predict(np.c_[xx.ravel(), yy.ravel()])
predicted = predicted.reshape(xx.shape)
# print(predicted)
axarr[index // 2, index % 2].contourf(xx, yy, predicted, alpha=0.3)
axarr[index // 2, index % 2].scatter(X[:, 0], X[:, 1], c=y, alpha=0.6)
axarr[index // 2, index % 2].set_title("%s (%f)" % (title, classifier.score(X, y)))
plt.show()
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import itertools
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
from sklearn.metrics import confusion_matrix
matrix = confusion_matrix(y, clf.predict(X))
plot_confusion_matrix(matrix, ['A', 'B', 'C'], normalize=True)
## クロスバリデーション
for classifier, title in zip(classifiers, ['Decision Tree (depth=2)', 'KNN (k=1)', 'Kernel SVM', 'Iris Classifier']):
scores = cross_val_score(classifier, X, y, cv=6)
print("Cross Validation Score of %s" % title)
print("mean(%s) = %s" % (scores, scores.mean()))
## パラメータのグリッドサーチ
from sklearn.model_selection import GridSearchCV
search_params = [{
'criterion': ['gini', 'entropy'],
'max_depth': [1],
FILL HERE
}]
tuned_clf = GridSearchCV(DecisionTreeClassifier(random_state=1), search_params, cv=6)
tuned_clf.fit(iris.data, iris.target)
print("Best Score %s " % tuned_clf.best_score_)
print("Best Params %s " % tuned_clf.best_params_)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'uhh', 'sandbox-2', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Linear regression will obviously be a bad fit.
Step2: Let us transform it into a 3-degree polynomial fit and perform the same linear regression.
Step3: Pipeline
Step4: Validation and Hyperparameters tuning
Step5: Grid Search
Step6: This enables to see the parameters corresponding to the quantities to fit
Step7: We can then get the best parameters and the corresponding model.
Step8: We notice that the grid search based on cross validation helped discarded overfitted models (as they were bad on validation sets).
Step9: The vectorizer has registered the feature names and outed a sparse matrix that can be converted to a Dataframe.
Step10: The tf-idf verctorizer works the same way.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
#import seaborn as sns
#sns.set()
N = 100 #points to generate
X = np.sort(10*np.random.rand(N, 1)**0.8 , axis=0) #abscisses
y = 4 + 0.4*np.random.rand(N) - 1. / (X.ravel() + 0.5)**2 - 1. / (10.5 - X.ravel() ) # some complicated function
plt.scatter(X,y)
from sklearn.linear_model import LinearRegression
model = LinearRegression().fit(X, y)
yfit = model.predict(X)
plt.scatter(X, y)
plt.plot(X, yfit,color='r',label="linear regression")
plt.legend()
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(degree=3, include_bias=False) # 3 degree without degree 0 (no constant)
XPoly = poly.fit_transform(X)
print(XPoly[:5,])
modelPoly = LinearRegression().fit(XPoly, y)
yfitPoly = modelPoly.predict(XPoly)
plt.scatter(X, y)
plt.plot(X, yfit,color='r',label="linear regression")
plt.plot(X, yfitPoly,color='k',label="Polynomial regression (deg 3)")
plt.legend(loc = 'lower right')
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
polyFeat = PolynomialFeatures(degree=3, include_bias=False)
linReg = LinearRegression()
polyReg = Pipeline([ ('polyFeat',polyFeat) , ('linReg',linReg) ])
polyReg.fit(X, y) # X original not XPoly
yfitPolyNew = polyReg.predict(X)
plt.scatter(X, y)
plt.plot(X, yfit,color='r',label="linear regression")
plt.plot(X, yfitPolyNew,color='k',label="Polynomial regression (deg 3)")
plt.legend(loc = 'lower right')
from sklearn.model_selection import cross_val_score
cv_score = cross_val_score(polyReg, X, y, cv=5, scoring="neg_mean_absolute_error") # 5 groups cross validation
print(cv_score)
print("Mean score:" , np.mean(cv_score))
polyReg.get_params()
param_grid = [
{'polyFeat__degree': np.arange(1,12),
'linReg__fit_intercept': [True,False],
'polyFeat__include_bias': [True,False]}]
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(polyReg, param_grid, cv=5)
grid.fit(X, y)
grid.best_params_
best_model = grid.best_estimator_.fit(X,y)
overfit = polyReg.set_params(polyFeat__degree=15).fit(X,y)
Xplot = np.linspace(-1,10.5,100).reshape(-1, 1)
yBest = best_model.predict(Xplot)
yOver = overfit.predict(Xplot)
plt.scatter(X, y)
plt.plot(Xplot, yBest , 'r' ,label="Best polynomial")
plt.plot(Xplot, yOver , 'k' , label="overfitted (deg 15)")
plt.legend(loc = 'lower right')
plt.ylim([0,5])
plt.title("Best and overfitted models")
f = open('./data/poems/poe-raven.txt', 'r')
poe = f.read().replace('\n',' ').replace('.','').replace(',','').replace('-','')
poe
from sklearn.feature_extraction.text import CountVectorizer
vec = CountVectorizer()
X = vec.fit_transform([poe])
X
import pandas as pd
pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer()
X = vec.fit_transform([poe])
pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configuring stuff for visualizations
Step2: Playing Peace War Game with 14 Players for 650,000 iterations
Step3: Grabbing scores of each agent
Step4: Converting current score number range to smaller range maintaining ratio
Step5: Displaying Scores and Summary of Game
Step6: (The Interactive Map may not be rendered on Github)
|
<ASSISTANT_TASK:>
Python Code:
import gmaps, os # Used for interactive visualizations
from game_types import NPlayerGame
import tensorflow as tf
import pandas as pd
gmaps.configure(api_key=os.environ["GOOGLE_API_KEY"])
locs = [
[(37.760851, -122.443118), (37.760853, -122.443120)], # Silcon Valley
[(40.092034, -88.238687), (40.092035, -88.238688)], # Urbana
[(25.777052, -80.194957), (25.777054, -80.194959)], # Flordia
[(40.705773, -74.010861), (40.705774, -74.010863)], # Manhattan
[(35.898512, -78.865059), (35.898513, -78.865060)], # NC
[(42.278052, -83.738997), (42.278053, -83.738998)], # Michigan
[(35.844058, -106.287484), (35.844059, -106.287485)], # New Mexico
[(33.745074, -84.390840), (33.745076, -84.390842)], # Georgia
[(32.758009, -96.805532), (32.758011, -96.805534)], # Texas
[(47.653022, -122.305569), (47.653024, -122.305571)], # Washington
[(47.653532, -100.347697), (47.653533, -100.347698)], # ND
[(34.069110, -118.246972), (34.069112, -118.246974)], # Sol Cal
[(44.723362, -111.071472), (44.723363, -111.071473)] # WY
]
names = ['Silcon Valley', 'Illinois', 'Flordia', 'Manhattan', 'North Carolina',
'Michigan', 'New Mexico', 'Georgia', 'Texas', 'Washington', 'North Dakota', 'Sol Cal', 'Wyoming']
tf.reset_default_graph()
game = NPlayerGame(n_players=14) # Create 13 agents of random type
game.play(650000) # Play 600,000 iterations
agent_name, agent_score = [], []
for agent in game.data:
if agent != 'id':
agent_name.append(agent)
agent_score.append(sum(game.data[agent]))
old_range = max(agent_score) - min(agent_score)
new_range = 35
new_agent_scores = []
for old_val in agent_score:
new_agent_scores.append( (((old_val-min(agent_score))*new_range)/old_range) + 10 )
#print(old_range)
layers = []
fig = gmaps.Map()
#print('Agent Name \t\t\t| Location \t\t\t| Total Score')
for i, loc in enumerate(locs):
#print('{0} \t\t\t| {1} \t\t\t| \t\t{2}'.format(agent_name[i], names[i], new_agent_scores[i]))
_layer = gmaps.heatmap_layer(loc, point_radius=int(new_agent_scores[i]))
fig.add_layer(_layer)
d ={'Agent Name': agent_name, 'Location': names, 'Total Score': agent_score, 'Radius Size': new_agent_scores}
fig
pd.DataFrame(d)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
numerator = 98
denominator = 42
gcd = np.gcd(numerator, denominator)
result = (numerator//gcd, denominator//gcd)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.
Step2: We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input.
Step3: Training
Step4: Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss.
Step5: Checking out the results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
# Size of the encoding layer (the hidden layer)
encoding_dim = 16 # feel free to change this value
img_shape = mnist.train.images.shape[1]
# Input and target placeholders
inputs_ = tf.placeholder(tf.float32, (None, img_shape))
targets_ = tf.placeholder(tf.float32, (None, img_shape))
# Output of hidden layer, single fully connected layer here with ReLU activation
encoded = tf.layers.dense(inputs_, encoding_dim, activation=tf.nn.relu)
# Output layer logits, fully connected layer with no activation
logits = tf.layers.dense(encoded, img_shape)
# Sigmoid output from logits
decoded = tf.nn.sigmoid(logits)
# Sigmoid cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Mean of the loss
cost = tf.reduce_mean(loss)
# Adam optimizer
opt = tf.train.AdamOptimizer().minimize(cost)
# Create the session
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
feed = {inputs_: batch[0], targets_: batch[0]}
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'sandbox-1', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python accepts the previous because of dynamic typing (C would throw an error!)
Step2: The previous line is going to cause a PyIntObject to be created. It is going to hold the value of the object, along with other details for Python to work with under the hood (the type of the object, number of references to the object etc.).For those more versed on programming, 'x' is being created on the stack and the PyIntObject with the value of three is being created on the heap. The stack hold your local variables and is managed by your program whereas the heap hold dinamically created data and is actually managed by the OS.
Step3: Now a PyFloatObject with value of 4.5 will be created, then 'x' will point to that object.
Step4: The 'is' returns True if the references point to the same object. We hope it is False and it is
Step5: This object calls PyStringObject. There are some string methods
Step6: The method above doesn't change the string permanently. To do this, we need apply the changes to the object
Step7: 2.2.5. Python
Step8: Python doesn't use brackets, it uses indentation. The range function takes the following arguments
Step9: 2.2.7. Python
Step10: It is possible to make the same as before, but using conditionals
Step11: The '%' is the modulo or remainder of a quotient. Now imagine we want the following output
Step12: If you want many 'else if' statement, you can use the 'elif' in Python
Step13: 2.2.9. Python
Step14: In a compiled language, the compiler would have caught the type mismatch and we wouldn't have been allowed to pass a string to a function expecting a numeric argument. But in Python, it doesn't come up until run time. We get an error by trying to run this.
Step15: We might think that this would throw an error, because the 'print_abs' function doesn't return anything, but in fact it returns 'None'.
Step16: 2.2.10. Python
Step17: Print is not the same as return. Function 2 isn't returning the absolute value, it is just printing it.
Step18: The swap function does swap the values within the function, it doesn't change what 'x' and 'y' points to.
Step19: The variable 'val' was declared in the scope of the function 'my_abs'. It doesn't live outside that!
Step20: Now we have a variable that lives outside the function. By declaring 'val' at the top of the file, we've made it a global variable. Be cautious
Step21: Concatenation
Step22: Replication
Step23: Strip(s[, chars])
Step24: split(s[, sep[, maxsplit]])
Step25: Slicing
Step26: Substring testing
Step27: find(sub[, start [, end]])
Step28: Convert to number
Step29: String formatting
Step30: 2.3.3. Lists in Python
Step31: | 11 | 22 | 33 |
Step32: 2.3.4. Quiz
Step33: 2.3.6. Tuples in Python
Step34: Immutability is important for 2 reasons
Step35: Dictionaries are unordered!
Step36: 2.3.8. List and Dictionary Comprehension
Step37: Let's now make a list with these values
Step38: 2.3.9. Sets in Python
Step39: 2.3.10. Python Word Count
Step40: 2.4. Unix
|
<ASSISTANT_TASK:>
Python Code:
x = 3
x = 4.5
x = 3
x = 4.5
x = 3
y = 3.0
x is y
x == y
x = 'Hello'
x = 'Hello'
x.lower()
x
x = 'Hello'
x = x.lower()
x
for i in range(0, 10):
print(i)
i = 2
while i < 12:
print(i)
i += 3
for i in range(0, 10, 2):
print(i)
for i in range(0, 10):
if i % 2 == 0:
print(i)
for i in range(0, 5):
if i % 2 == 0:
print(i)
else:
print(i + 10)
for i in range(0, 5):
if i % 2 == 0:
print(i)
elif i % 3 == 1:
print(i + 10)
else:
print(i - 10)
def my_abs(val):
if val < 0:
return 0 - val
return val
print(my_abs(-7))
print(my_abs('Hi'))
def print_abs(val):
if val < 0:
print(0 - val)
else:
print(val)
x = print_abs(-2.7)
print(x)
def inc_val(val):
val = val + 1
x = 7
inc_val(x)
print(x)
# Function 1:
def my_abs(val):
if val < 0:
return 0 - val
return val
# Function 2:
def my_abs(val):
if val < 0:
print(0 - val)
else:
print(val)
def swap(val1, val2):
tmp = val1
val1 = val2
val2 = tmp
x = 6
y = 3
swap(x, y)
print(x,", ",y)
def my_abs(val):
if val < 0:
return 0 - val
return val
print(val)
val = 0
def my_abs(val):
if val < 0:
return 0 - val
return val
print(val)
# All characters to lower case:
'Hello World!'.lower()
# All characters to upper case:
'Hello World!'.upper()
'1' + '2'
'Hello ' + 'World' + '!'
'Spam' * 5
'Spam' * 3 + 'Eggs' * 2
' Extras \n'.strip()
'****10*****'.strip('*')
'Let\'s split the words'.split(' ')
'Jane,Doe,Cars,5'.split(',')
word = 'Hello'
word[1:3] # 1 inclusive to 3 exclusive
word[4:7]
word[-4:-1]
word = 'Hello'
'HE' in word
'He' in word
word.find('el')
word = '1234'
int(word)
float(word)
word = 'Hello'
int(word)
statement = 'We love {} {}.' # {} are placeholders
statement.format('data', 'analysis')
statement = 'We love {0} {1}.' # you can number the {}
statement.format('data', 'analysis')
statement = 'We love {1} {0}.'
statement.format('analysis', 'data')
list1 = [11, 22, 33]
list1
list1[1] # slicing the list
list1[3] # there is no third element, we get an error
# iterate over a list using Python-like syntax:
for i in list1:
print(i)
# iterate over a list using 'C'-like syntax:
for i in range(0, len(list1)):
print(list1[i])
# Lists are MUTABLE:
list1 = [11, 22, 33]
list1[1] = 95
list1
# Appending to a list:
list1 = [11, 22, 33]
list1.append(44)
list1
# Deleting from a list:
list1 = [11, 22, 33, 44]
list1.pop(2) # by the index
list1
list1 = [11, 22 , 33, 44]
list1.remove(33) # by the value
list1
# Adding a List to a List: extend
list1 = [1, 2, 3]
list2 = [4, 5, 6]
list1.extend(list2)
list1
# Extend vs Append:
list1 = [1, 2, 3]
list1.append(list2)
list1
# Zipping Lists:
list1 = [1, 2, 3]
list2 = [4, 5, 6]
for x, y in zip(list1, list2):
print(x, ',', y)
# Quiz:
x = [10,20,30]
y = x # y is always pointing to the x list!
x[1] = 42 # y is still 'pointing' to the x list
print(y)
# If we want a new copy of the list x:
x = [10, 20, 30]
y = list(x) # y now 'points' to a new object
x[1] = 42
print(y)
tuple1 = ('Honda', 'Civic', 4, 2017)
tuple1
# We can slice the tuple (the same as a list):
tuple1[1]
# Length of the tuple:
len(tuple1)
# Iterating over a tuple:
for i in tuple1:
print(i)
tuple1[3] = 2018
# Create a dictionary:
dict1 = {('Ghostbusters', 2016): 5.4,
('Ghostbusters', 1984): 7.8}
dict1
# Dictionary slicing:
dict1[('Ghostbusters', 2016)]
# Length of the dictionary
len(dict1)
# Add a new key to the dictionary and its value:
dict1[('Cars', 2006)] = 7.1
dict1
# Get a value back from a selected key:
dict1 = {('Ghostbusters', 2016): 5.4,
('Ghostbusters', 1984): 7.8,
('Cars', 2006): 7.1}
x = dict1[('Cars', 2006)]
x
# Ask for a key not in the dictionary:
y = dict1[('Toy Story', 1995)]
y
# Safer way to get a value from a dictionary:
dict1 = {('Ghostbusters', 2016): 5.4,
('Ghostbusters', 1984): 7.8,
('Cars', 2006): 7.1}
x = dict1.get(('Cars', 2006))
x
# Safer way with non-existing key:
x = dict1.get(('Toy Story', 1995))
x == None
('Toy Story', 1995) in dict1
# Deleting from a dictionary:
dict1 = {('Ghostbusters', 2016): 5.4,
('Ghostbusters', 1984): 7.8,
('Cars', 2006): 7.1}
dict1.pop(('Ghostbusters', 2016)) # we get the value deleted!
dict1 # no longer exists
dict1 = {('Ghostbusters', 2016): 5.4,
('Ghostbusters', 1984): 7.8,
('Cars', 2006): 7.1}
del dict1[('Cars', 2006)] # we doesn't get the value back
dict # no longer exists
# Iterating over a dictionary:
dict1 = {('Ghostbusters', 2016): 5.4,
('Ghostbusters', 1984): 7.8,
('Cars', 2006): 7.1}
for i in dict1:
# print the keys
print(i)
for key, value in dict1.items():
# print keys and values
print(key, ':', value)
# Be CAREFUL while iterating:
for i in dict1:
# trying to delete items from dictionary
dict1.pop(i)
# Selective removal:
dict1 = {('Ghostbusters', 2016): 5.4,
('Ghostbusters', 1984): 7.8,
('Cars', 2006): 7.1}
to_remove = [] # created an empty list
for i in dict1:
# iterate over dict, append to the to_remove if met the criteria
if (i[1] < 2000):
to_remove.append(i)
for i in to_remove:
# iterate over to_remove, pop from the dict
dict1.pop(i)
dict1
list1 = [i ** 2 for i in range(1, 11)]
list1
list1 = [i for i in range(0, 6)]
list1
# All even values from 0 to 20:
list1 = [i for i in range(0, 20, 2)]
list1
# List with alternate value 0 and 1:
list1 = [i % 2 for i in range(0, 10)]
list1
# List with 10 random integers between 0 and 5:
import random
list1 = [random.randint(0, 5) for i in range(0, 10)]
list1
# Dictionary comprehension:
dict1 = {i: i ** 2 for i in range(1, 11)}
dict1
# Dictionary with values from A to Z and numeric keys:
dict1 = {i : chr(i) for i in range(65, 90)}
dict1
# Create a set:
leos_colors = set(['blue', 'green', 'red'])
leos_colors
# Add a new item:
leos_colors.add('yellow')
leos_colors
# Add an existing value to a set:
leos_colors.add('blue')
leos_colors
# Remove items: Discard
# if you try to discard an item which doesn't exist, it does nothing
leos_colors = set(['blue', 'green', 'red'])
leos_colors.discard('green')
leos_colors.discard('orange')
leos_colors
# Remove items: Remove
# if you try to remove an item which doesn't exist, it throws an error!
leos_colors = set(['blue', 'green', 'red'])
leos_colors.remove('orange')
# Set operations: Union
leos_colors = set(['blue', 'green', 'red'])
ilkays_colors= set(['blue', 'yellow'])
either = ilkays_colors.union(leos_colors)
either
# Set operations: Intersection
leos_colors = set(['blue', 'green', 'red'])
ilkays_colors = set(['blue', 'yellow'])
both = ilkays_colors.intersection(leos_colors)
both
# Set quick operators:
leos_colors & ilkays_colors
leos_colors | ilkays_colors
# Be sure you have followed the instructions to download the 98-0.txt,
# the text of A Tale of Two Cities, by Charles Dickens
import collections
file = open('/home/jayme/Courses/Python 4 DS/word_cloud/98-0.txt')
# if you want to use stopwords, here's an example of how to do this
stopwords = set(line.strip() for line in open('/home/jayme/Courses/Python 4 DS/word_cloud/stopwords'))
# create your data structure here. F
wordcount={}
# Instantiate a dictionary, and for every word in the file, add to
# the dictionary if it doesn't exist. If it does, increase the count.
# Hint: To eliminate duplicates, remember to split by punctuation,
# and use case demiliters. The functions lower() and split() will be useful!
for word in file.read().lower().split():
word = word.replace(".","")
word = word.replace(",","")
word = word.replace("\"","")
word = word.replace("“","")
if word not in stopwords:
if word not in wordcount:
wordcount[word] = 1
else:
wordcount[word] += 1
# after building your wordcount, you can then sort it and return the first
# n words. If you want, collections.Counter may be useful.
d = collections.Counter(wordcount)
#print(d.most_common(10))
for word, count in d.most_common(10):
print(word, ": ", count)
tup1 = ('physics', 'chemistry', 1997, 2000, 2001, 1999)
tup1[1:4]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.- BASE DE DATOS DE CULTIVOS DE BAJÍO AMAZÓNICO
Step2: Obtenemos un consolidado por estrato para hacernos una idea
Step3: 2.- Linderos de las Chacra
Step4: Corresponde a una chacra de media hectárea
Step5: Los puntos de esta grilla pueden posteriormente ser transformados en latitud/longitud para su ubicación con un GPS.
Step6: El algoritmo que ubica las plantas, lo hace iterando por la lista de especies, desde las más altas a las más pequeñas, probando en orden una lista de reglas que deben cumplirse siempre
Step7: Regla 1
Step8: Regla 2
Step10: Regla 3
Step11: Se puede ver qué tantas veces una regla descartó un punto, y tomarlo como un indicador de qué tan estricta es una regla.
Step12: Algunas plantas pueden haber quedado excluidas por no poder ser ubicadas satisfactoriamente.
Step14: 4.- Graficar el mapa
|
<ASSISTANT_TASK:>
Python Code:
# Bibliotecas utilizadas para confeccionar el mapa
%matplotlib inline
import matplotlib.pyplot as plt
from descartes import PolygonPatch
import matplotlib.cm as cmx
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
from shapely import geometry
from shapely import ops
import numpy as np
import pandas as pd
import random
import mpld3
from tqdm import tqdm
import time
# Para realizar un video
from moviepy import editor as mpy
from moviepy.video.io.bindings import mplfig_to_npimage
from copy import deepcopy
# Emular Python3 (tuvimos que revertir a Python2 por MoviePy)
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import six
# Activar extensión vega para diagramar en línea:
# jupyter nbextension install --py vega --user
# jupyter nbextension enable vega --user --py
plt.ioff()
especies = pd.read_csv("db_final.csv") ## Es una base
especies = especies[(especies.Usos=="Medicinal")]
especies = especies[(especies.ind>0)]
especies = especies.sort_values(by=['Altura.tot'], ascending=False)
# Filtros posibles de pruebas
#especies = especies.head(8)
#especies = especies.tail(6)
especies
consolidado = pd.DataFrame({'especies':especies.groupby('Estrato').size(),
'individuos':especies.groupby('Estrato')['ind'].sum()})
pd.concat([consolidado,pd.DataFrame(consolidado.sum(axis=0),columns=['Total']).T])
chacra = geometry.Polygon([ [0,0],
[0, 93],
[55, 93],
[55, 86],
[35, 65],
[37.5, 47.5],
[55, 44],
[65, 52.5],
[65, 0] ])
chacra
print (chacra.area, "m²")
class Poblacion:
def __init__(self, poligono, especies, make_frame=None):
self.poligono = poligono
self.especies = especies
# Creamos una tabla para almacenar los individuos
individuos_columns = ['id', 'pos', 'x', 'y', 'color', 'Diametro.dosel',
'Nombre', 'Estrato', 'Diametro.punto', 'Altura.tot']
self.individuos = pd.DataFrame( columns=individuos_columns, )
self.n_individuos = 0
# También almacenaremos a los individuos que no pudimos ubicar (de ser el caso)
self.excluidos = pd.DataFrame( columns=individuos_columns, )
self.inubicables = 0
self.exclusiones = []
colores = get_colormap(len(especies))
i=0
for especie in tqdm(list(especies.itertuples())):
i=i+1
n_especie = especie[0]
color_especie = colores(i)
planta_tipo = especies.loc[n_especie]
for n_individuo in range(especies.ind[n_especie]):
self.n_individuos = self.n_individuos + 1
planta = planta_tipo.copy()
planta['id'] = n_especie
#### Algoritmo para ubicar la planta ###
intentos = 10
while intentos:
intentos = intentos - 1
distancia_min = 1 # 1m
# Ubicamos un punto aleatorio *random* dentro del polígono
(minx, miny, maxx, maxy) = self.poligono.bounds
while True:
p = geometry.Point(random.uniform(minx, maxx), random.uniform(miny, maxy))
if poligono.contains(p):
pos = p
break
planta['pos'] = pos
planta['x'], planta['y'] = pos.xy
try:
regla_1(self, planta)
regla_2(self, planta)
regla_3(self, planta)
except ValueError as e:
self.exclusiones.append(str(e))
self.inubicables = self.inubicables + 1
planta['pos'] = None
next
else:
if make_frame:
make_frame(self)
break
########################## Fin Algoritmo Ubicación
if planta['pos']:
planta['color'] = color_especie
planta['Diametro.dosel'] = float(planta['Diametro.dosel'])
planta['Diametro.punto'] = float(planta['Diametro.dosel']*10)
self.individuos.loc[self.n_individuos] = planta
else:
self.excluidos.loc[self.inubicables] = planta
self.individuos = self.individuos.dropna()
self.individuos = self.individuos.sort_values(by=['Altura.tot'], ascending=False)
if self.inubicables:
print ("No se pudieron ubicar " + str(self.inubicables) + " individuos.")
#self.inubicables = pd.DataFrame(self.inubicables)
def regla_1(poblacion, planta):
if planta.Estrato < 3:
if planta.pos.distance(poblacion.poligono.exterior) < planta['Altura.tot'] * 0.3:
raise ValueError('regla1')
else:
if planta.pos.distance(poblacion.poligono.exterior) < planta['Altura.tot'] * 0.3:
raise ValueError('regla1')
def regla_2(poblacion, planta):
for anterior in poblacion.individuos.itertuples():
if planta.Estrato < 3:
distancia_min = (planta.Distancia / 2 ) - (planta.Distancia / 2) * planta.Sombra * 2
else:
distancia_min = planta.Distancia / 2
if planta.pos.distance(anterior[2]) < distancia_min:
raise ValueError('regla2')
def regla_3(poblacion, planta):
mi_estrato = int(especies.Estrato[planta.id])
mas_cercanos = []
for anterior in poblacion.individuos.itertuples():
if not mas_cercanos:
mas_cercanos = [anterior]
for cercano in mas_cercanos:
if planta.pos.distance(anterior[2]) < planta.pos.distance(cercano[2]):
mas_cercanos.append(anterior)
if len(mas_cercanos) == 3:
mas_cercanos.pop(0)
for cercano in mas_cercanos:
if abs(especies.Estrato[int(cercano[1])] - mi_estrato) > 1:
raise ValueError('regla3')
def get_colormap(N):
color_norm = colors.Normalize(vmin=0, vmax=N-1)
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='terrain')
def map_index_to_rgb_color(index):
return scalar_map.to_rgba(index)
return map_index_to_rgb_color
# http://stackoverflow.com/questions/14720331/how-to-generate-random-colors-in-matplotlib
class Figure:
def __init__(self, poblacion=None):
self.colores = None
self.poligono = None
self.fig = plt.figure(figsize=(7, 7))
self.ax = self.fig.add_subplot(111)
self.ax.set_ylim(-10,100)
self.ax.set_xlim(-20,90)
self.ax.grid(color='gray', alpha=0.9)
if poblacion:
self.plot_poblacion(poblacion)
# Barra de colores
# def createColourbar(lwr, upr):
# Create a colourbar with limits of lwr and upr
# cax, kw = colorbar.make_axes(plt.gca())
# norm = colors.Normalize(vmin = lwr, vmax = upr, clip = False)
# c = colorbar.ColorbarBase(cax, cmap=plt.cm.get_cmap('terrain_r'), norm=norm)
# return c
#cb = createColourbar(0, len(pob.especies)-1)
#cb.set_label('ESPECIES', labelpad=-50, y=0.45)
#cb.set_ticks(list(reversed(range(0, len(pob.especies)-1))))
#cb.set_ticklabels(list(pob.especies.Nombre))
#try:
# cb.autoscale()
# self.fig.colorbar(cb)
#except TypeError:
# pass
def plot_poblacion(self, poblacion):
colormap = cmx.ScalarMappable(
colors.Normalize(1, len(especies)),
plt.cm.get_cmap('terrain'))
if not self.colores:
cm = plt.cm.get_cmap('terrain')
colores = [list(cm(x)) for x in range(len(poblacion.especies))]
self.colores = list(reversed(colores))
if not self.poligono:
self.ax.plot( *poblacion.poligono.exterior.xy )
# Los círculos y puntos representan a cada individuo
circulos = []
colores = []
puntos_x = []
puntos_y = []
labels = dict()
for planta in poblacion.individuos.iterrows():
circulos.append(planta[1]['pos'].buffer(planta[1]['Diametro.dosel']))
colores.append(planta[1]['color'])
puntos_x.append(planta[1]['x'])
puntos_y.append(planta[1]['y'])
labels[planta[1]['id']]=planta[1]['Nombre']
i=0
for poligono in circulos:
patch = PolygonPatch(poligono, fc=colores[i-1], alpha=0.2, zorder=1)
self.ax.add_patch(patch)
i = i+1
self.ax.scatter(puntos_x,
puntos_y,
c=colores,
s=20,
cmap="terrain",
vmin=0,
vmax=len(poblacion.especies)-1,
alpha=0.6, marker="+")
starttime = time.time()
frames = {}
def save_frame(poblacion):
f = Figure(poblacion)
t = time.time() - starttime
img_array = mplfig_to_npimage(f.fig)
frames[t] = img_array
del(f)
# Demora unos 3 minutos
pob = Poblacion(chacra, especies, save_frame)
seconds = sorted(frames.keys())[-1] - sorted(frames.keys())[-0]
def make_frame(t):
keyframes = sorted(frames.keys())
t_min = min(keyframes)
this_frame = frames[min(keyframes, key=lambda x:abs(x-t-t_min))]
return this_frame
anim = mpy.VideoClip(make_frame, duration=seconds)
anim.write_videofile("vis_algoritmo.mp4", fps=50)
print ("Se ubicaron %s de %s individuos." %
( len(pob.individuos), especies.ind.sum(axis=0)))
exclusiones = pd.DataFrame({'regla':pob.exclusiones})
pd.DataFrame({'causales de exclusión':exclusiones.groupby('regla').size()})
pd.DataFrame({'excluidos':pob.excluidos.groupby('Nombre').size()})
f = Figure(pob)
#from vega import VegaLite
VegaLite({
"mark": "point",
"encoding": {
"y": {"type": "quantitative","field": "y"},
"x": {"type": "quantitative","field": "x"}
}
}, pob.individuos)
# Gráfico interactivo (no funciona en github)
# mpld3.display(fig)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, we load the data from the previous run. Check what folder was created! If you ran the simulation code multiple times, each time a new folder will be created in sequential order (data0, data1, data2, ...). This tct.qd.load function hides some of the data loading logic that was described at the end of fullspace_qd_run.ipynb.
Step2: It can be nice to make some figures while the simulation is still running. For long running, large simulations, it's expensive to reload all the data, so load_new_files() allows updating the data object with any new time steps that have been completed and saved. By default, results are saved in 100 time step chunks. Look in the data0 folder to see.
Step3: Create the plotting object. This process the data a bit to make field like slip and velocity easier to plot.
Step4: The summary() function makes four useful plots that show the overall evolution of the fault
Step5: The qdp.V_info function provides the necessary values, levels, contour levels, and colormap to the qdp.nicefig function to make a handy figure of the state of the x component of slip rate at the 1050th time step.
Step6: Let's make a whole bunch of this same figure and turn them into a video. We'll make a figure every 4th step and name the final video qd_video. This should create a qd_video0.mp4 file. Enjoy!
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import tectosaur as tct
import tectosaur.qd
import tectosaur.qd.plotting
tct.qd.configure(
gpu_idx = 0, # Which GPU to use if there are multiple. Best to leave as 0.
fast_plot = True, # Let's make fast, inexpensive figures. Set to false for higher resolution plots with latex fonts.
)
plt.style.use('default')
folder_name = 'data0'
data = tct.qd.load(folder_name, tct.qd.FullspaceModel)
data.load_new_files()
qdp = tct.qd.plotting.QDPlotData(data)
qdp.summary()
qdp.nicefig(*qdp.V_info(99), dim = [0,2])
video_name = qdp.qd_video(range(1, qdp.n_steps, 4), qdp.V_info, video_prefix = 'qd_video', dim = [0,2])
tct.qd.plotting.make_mp4(video_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Quantization aware training in Keras example
Step2: Train a model for MNIST without quantization aware training
Step3: Clone and fine-tune pre-trained model with quantization aware training
Step4: Train and evaluate the model against baseline
Step5: For this example, there is minimal to no loss in test accuracy after quantization aware training, compared to the baseline.
Step6: Create quantized model for TFLite backend
Step7: See persistence of accuracy from TF to TFLite
Step8: You evaluate the quantized model and see that the accuracy from TensorFlow persists to the TFLite backend.
Step9: See 4x smaller model from quantization
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
! pip install -q tensorflow
! pip install -q tensorflow-model-optimization
import tempfile
import os
import tensorflow as tf
from tensorflow import keras
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture.
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_split=0.1,
)
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
# q_aware stands for for quantization aware.
q_aware_model = quantize_model(model)
# `quantize_model` requires a recompile.
q_aware_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
q_aware_model.summary()
train_images_subset = train_images[0:1000] # out of 60000
train_labels_subset = train_labels[0:1000]
q_aware_model.fit(train_images_subset, train_labels_subset,
batch_size=500, epochs=1, validation_split=0.1)
_, baseline_model_accuracy = model.evaluate(
test_images, test_labels, verbose=0)
_, q_aware_model_accuracy = q_aware_model.evaluate(
test_images, test_labels, verbose=0)
print('Baseline test accuracy:', baseline_model_accuracy)
print('Quant test accuracy:', q_aware_model_accuracy)
converter = tf.lite.TFLiteConverter.from_keras_model(q_aware_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
quantized_tflite_model = converter.convert()
import numpy as np
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for i, test_image in enumerate(test_images):
if i % 1000 == 0:
print('Evaluated on {n} results so far.'.format(n=i))
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
print('\n')
# Compare prediction results with ground truth labels to calculate accuracy.
prediction_digits = np.array(prediction_digits)
accuracy = (prediction_digits == test_labels).mean()
return accuracy
interpreter = tf.lite.Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
test_accuracy = evaluate_model(interpreter)
print('Quant TFLite test_accuracy:', test_accuracy)
print('Quant TF test accuracy:', q_aware_model_accuracy)
# Create float TFLite model.
float_converter = tf.lite.TFLiteConverter.from_keras_model(model)
float_tflite_model = float_converter.convert()
# Measure sizes of models.
_, float_file = tempfile.mkstemp('.tflite')
_, quant_file = tempfile.mkstemp('.tflite')
with open(quant_file, 'wb') as f:
f.write(quantized_tflite_model)
with open(float_file, 'wb') as f:
f.write(float_tflite_model)
print("Float model in Mb:", os.path.getsize(float_file) / float(2**20))
print("Quantized model in Mb:", os.path.getsize(quant_file) / float(2**20))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sigmoid
Step2: When plotted on a range of -5,5, this gives the following shape.
Step3: Tanh
Step4: ReLU
Step5: It is probably worth noting, that the leaky ReLU is a closely related function with the only difference being that values < 0 are not completely set to 0, instead multiplied by 0.01.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def activation_sigmoid(x, derivative):
sigmoid_value = 1/(1+np.exp(-x))
if not derivative:
return sigmoid_value
else:
return sigmoid_value*(1-sigmoid_value)
x_values = np.arange(-5, 6, 0.1)
y_sigmoid = activation_sigmoid(x_values, derivative=False)
plt.plot(x_values, y_sigmoid)
def activation_tanh(x, derivative):
tanh_value = np.tanh(x)
if not derivative:
return tanh_value
else:
return 1-tanh_value**2
y_tanh = activation_tanh(x_values, derivative = False)
plt.plot(x_values, y_tanh)
def relu_activation(x, derivative):
if not derivative:
return x * (x>0)
else:
x[x <= 0] = 0
x[x > 0] = 1
return x
y_relu = relu_activation(x_values, derivative=False)
plt.plot(x_values, y_relu)
def softmax_activation(x):
exponent = np.exp(x - np.max(x))
softmax_value = exponent/np.sum(exponent, axis = 0)
return softmax_value
y_softmax = softmax_activation(x_values)
plt.plot(x_values, y_softmax)
print("The sum of all softmax probabilities can be confirmed as " + str(np.sum(y_softmax)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We define the function prep_datas (props to Alexandre), already used the previous week. However now we reshape the images from a 32x32 matrix (this value seems unnecessary, however the bigger the image the worst the classifiers will work) to a flat 1024 vector, a constraint given by the Random Forest classifier.
Step2: Then we load the training and the test set
Step3: We define the classifiers Random Forest Classifier and Support Machine Classifier and we train them throught the fit function. Taking a linear kernel for SVC gives the best results for this classifier.
Step4: Let's test how good the system is doing
Step5: Now we load the real set of images and test it. This part of the program has been taken from Alexandre's program from last week. First we load the 'real world' images
Step6: Then we make the predictions with both classifiers
Step7: Finally we plot the results
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
from skimage.transform import resize
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
import tools as im
from matplotlib import pyplot as plt
%matplotlib inline
path=os.getcwd()+'/' # finds the path of the folder in which the notebook is
path_train=path+'images/train/'
path_test=path+'images/test/'
path_real=path+'images/real_world/'
def prep_datas(xset,xlabels):
X=list(xset)
for i in range(len(X)):
X[i]=resize(X[i],(32,32,1)) # reduce the size of the image from 100X100 to 32X32. Also flattens the color levels
X[i]=np.reshape(X[i],1024) # reshape from 32x32 to a flat 1024 vector
X=np.array(X) # transforms it into an array
Y=np.asarray(xlabels) # transforms from list to array
return X,Y
training_set, training_labels = im.load_images(path_train)
X_train, Y_train = prep_datas(training_set,training_labels)
test_set, test_labels = im.load_images(path_test)
X_test,Y_test=prep_datas(test_set,test_labels)
classifierForest = RandomForestClassifier(n_estimators=1000)
classifierSVC=svm.SVC(kernel='linear')
classifierForest.fit(X_train, Y_train)
classifierSVC.fit(X_train,Y_train)
expectedF = Y_test
predictedF = classifierForest.predict(X_test)
predictedS = classifierSVC.predict(X_test)
print(expectedF)
print(predictedF)
print(predictedS)
real_world_set=[]
for i in np.arange(1,73):
filename=path+'images/real_world/'+str(i)+'.png'
real_world_set.append(im.deshear(filename))
fake_label=np.ones(len(real_world_set),dtype='int32')
X_real,Y_real=prep_datas(real_world_set,fake_label)
y_predF = classifierForest.predict(X_real)
y_predS = classifierSVC.predict(X_real)
f=open(path+'images/real_world/labels.txt',"r")
lines=f.readlines()
result=[]
for x in lines:
result.append((x.split(' ')[1]).replace('\n',''))
f.close()
result=np.array([int(x) for x in result])
result[result>1]=1
plt.plot(y_predF,'o')
plt.plot(1.2*y_predS,'o')
plt.plot(2*result,'o')
plt.ylim(-0.5,2.5);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Analysing our corpus.
Step4: Preprocessing our data. Remember
Step5: Finalising our dictionary and corpus
Step6: Topic modeling with LSI
Step7: Topic modeling with HDP
Step8: Topic modeling using LDA
Step9: pyLDAvis is a great way to visualize an LDA model. To summarize in short, the area of the circles represent the prevelance of the topic. The length of the bars on the right represent the membership of a term in a particular topic. pyLDAvis is based on this paper.
Step11: Finding out the optimal number of topics
Step13: LDA as LSI
Step14: Inference
Step16: Evaluating all the topic models
Step17: Customizing the topic coherence measure
Step18: To get topics out of the topic model
Step19: Step 1
Step20: Step 2
Step21: Step 3
Step22: Step 4
|
<ASSISTANT_TASK:>
Python Code:
import os
import re
import operator
import matplotlib.pyplot as plt
import warnings
import gensim
import numpy as np
warnings.filterwarnings('ignore') # Let's not pay heed to them right now
from gensim.models import CoherenceModel, LdaModel, LsiModel, HdpModel
from gensim.models.wrappers import LdaMallet
from gensim.corpora import Dictionary
from pprint import pprint
%matplotlib inline
test_data_dir = '{}'.format(os.sep).join([gensim.__path__[0], 'test', 'test_data'])
lee_train_file = test_data_dir + os.sep + 'lee_background.cor'
with open(lee_train_file) as f:
for n, l in enumerate(f):
if n < 5:
print([l])
def build_texts(fname):
Function to build tokenized texts from file
Parameters:
----------
fname: File to be read
Returns:
-------
yields preprocessed line
with open(fname) as f:
for line in f:
yield gensim.utils.simple_preprocess(line, deacc=True, min_len=3)
train_texts = list(build_texts(lee_train_file))
len(train_texts)
bigram = gensim.models.Phrases(train_texts) # for bigram collocation detection
bigram[['new', 'york', 'example']]
from gensim.utils import lemmatize
from nltk.corpus import stopwords
stops = set(stopwords.words('english')) # nltk stopwords list
def process_texts(texts):
Function to process texts. Following are the steps we take:
1. Stopword Removal.
2. Collocation detection.
3. Lemmatization (not stem since stemming can reduce the interpretability).
Parameters:
----------
texts: Tokenized texts.
Returns:
-------
texts: Pre-processed tokenized texts.
texts = [[word for word in line if word not in stops] for line in texts]
texts = [bigram[line] for line in texts]
texts = [[word.split('/')[0] for word in lemmatize(' '.join(line), allowed_tags=re.compile('(NN)'), min_length=3)] for line in texts]
return texts
train_texts = process_texts(train_texts)
train_texts[5:6]
dictionary = Dictionary(train_texts)
corpus = [dictionary.doc2bow(text) for text in train_texts]
lsimodel = LsiModel(corpus=corpus, num_topics=10, id2word=dictionary)
lsimodel.show_topics(num_topics=5) # Showing only the top 5 topics
lsitopics = lsimodel.show_topics(formatted=False)
hdpmodel = HdpModel(corpus=corpus, id2word=dictionary)
hdpmodel.show_topics()
hdptopics = hdpmodel.show_topics(formatted=False)
ldamodel = LdaModel(corpus=corpus, num_topics=10, id2word=dictionary)
import pyLDAvis.gensim
pyLDAvis.enable_notebook()
pyLDAvis.gensim.prepare(ldamodel, corpus, dictionary)
ldatopics = ldamodel.show_topics(formatted=False)
def evaluate_graph(dictionary, corpus, texts, limit):
Function to display num_topics - LDA graph using c_v coherence
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
limit : topic limit
Returns:
-------
lm_list : List of LDA topic models
c_v : Coherence values corresponding to the LDA model with respective number of topics
c_v = []
lm_list = []
for num_topics in range(1, limit):
lm = LdaModel(corpus=corpus, num_topics=num_topics, id2word=dictionary)
lm_list.append(lm)
cm = CoherenceModel(model=lm, texts=texts, dictionary=dictionary, coherence='c_v')
c_v.append(cm.get_coherence())
# Show graph
x = range(1, limit)
plt.plot(x, c_v)
plt.xlabel("num_topics")
plt.ylabel("Coherence score")
plt.legend(("c_v"), loc='best')
plt.show()
return lm_list, c_v
%%time
lmlist, c_v = evaluate_graph(dictionary=dictionary, corpus=corpus, texts=train_texts, limit=10)
pyLDAvis.gensim.prepare(lmlist[2], corpus, dictionary)
lmtopics = lmlist[5].show_topics(formatted=False)
def ret_top_model():
Since LDAmodel is a probabilistic model, it comes up different topics each time we run it. To control the
quality of the topic model we produce, we can see what the interpretability of the best topic is and keep
evaluating the topic model until this threshold is crossed.
Returns:
-------
lm: Final evaluated topic model
top_topics: ranked topics in decreasing order. List of tuples
top_topics = [(0, 0)]
while top_topics[0][1] < 0.97:
lm = LdaModel(corpus=corpus, id2word=dictionary)
coherence_values = {}
for n, topic in lm.show_topics(num_topics=-1, formatted=False):
topic = [word for word, _ in topic]
cm = CoherenceModel(topics=[topic], texts=train_texts, dictionary=dictionary, window_size=10)
coherence_values[n] = cm.get_coherence()
top_topics = sorted(coherence_values.items(), key=operator.itemgetter(1), reverse=True)
return lm, top_topics
lm, top_topics = ret_top_model()
print(top_topics[:5])
pprint([lm.show_topic(topicid) for topicid, c_v in top_topics[:10]])
lda_lsi_topics = [[word for word, prob in lm.show_topic(topicid)] for topicid, c_v in top_topics]
lsitopics = [[word for word, prob in topic] for topicid, topic in lsitopics]
hdptopics = [[word for word, prob in topic] for topicid, topic in hdptopics]
ldatopics = [[word for word, prob in topic] for topicid, topic in ldatopics]
lmtopics = [[word for word, prob in topic] for topicid, topic in lmtopics]
lsi_coherence = CoherenceModel(topics=lsitopics[:10], texts=train_texts, dictionary=dictionary, window_size=10).get_coherence()
hdp_coherence = CoherenceModel(topics=hdptopics[:10], texts=train_texts, dictionary=dictionary, window_size=10).get_coherence()
lda_coherence = CoherenceModel(topics=ldatopics, texts=train_texts, dictionary=dictionary, window_size=10).get_coherence()
lm_coherence = CoherenceModel(topics=lmtopics, texts=train_texts, dictionary=dictionary, window_size=10).get_coherence()
lda_lsi_coherence = CoherenceModel(topics=lda_lsi_topics[:10], texts=train_texts, dictionary=dictionary, window_size=10).get_coherence()
def evaluate_bar_graph(coherences, indices):
Function to plot bar graph.
coherences: list of coherence values
indices: Indices to be used to mark bars. Length of this and coherences should be equal.
assert len(coherences) == len(indices)
n = len(coherences)
x = np.arange(n)
plt.bar(x, coherences, width=0.2, tick_label=indices, align='center')
plt.xlabel('Models')
plt.ylabel('Coherence Value')
evaluate_bar_graph([lsi_coherence, hdp_coherence, lda_coherence, lm_coherence, lda_lsi_coherence],
['LSI', 'HDP', 'LDA', 'LDA_Mod', 'LDA_LSI'])
from gensim.topic_coherence import (segmentation, probability_estimation,
direct_confirmation_measure, indirect_confirmation_measure,
aggregation)
from gensim.matutils import argsort
from collections import namedtuple
make_pipeline = namedtuple('Coherence_Measure', 'seg, prob, conf, aggr')
measure = make_pipeline(segmentation.s_one_one,
probability_estimation.p_boolean_sliding_window,
direct_confirmation_measure.log_ratio_measure,
aggregation.arithmetic_mean)
topics = []
for topic in lm.state.get_lambda():
bestn = argsort(topic, topn=10, reverse=True)
topics.append(bestn)
# Perform segmentation
segmented_topics = measure.seg(topics)
# Since this is a window-based coherence measure we will perform window based prob estimation
per_topic_postings, num_windows = measure.prob(texts=train_texts, segmented_topics=segmented_topics,
dictionary=dictionary, window_size=2)
confirmed_measures = measure.conf(segmented_topics, per_topic_postings, num_windows, normalize=False)
print(measure.aggr(confirmed_measures))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The CMU pronouncing dictionary consists of sounds/words and their corresponding phonetic description (American pronunciation).
Step2: Next we're going to get a list of the unique phonemes in our vocabulary, as well as add a null "_" for zero-padding.
Step3: Then we create mappings of phonemes and letters to respective indices.
Step4: Let's create a dictionary mapping words to the sequence of indices corresponding to it's phonemes, and let's do it only for words between 5 and 15 characters long.
Step5: Aside on various approaches to python's list comprehension
Step6: Split lines into words, phonemes, convert to indexes (with padding), split into training, validation, test sets. Note we also find the max phoneme sequence length for padding.
Step7: Sklearn's <tt>train_test_split</tt> is an easy way to split data into training and testing sets.
Step8: Next we proceed to build our model.
Step9: Without attention
Step10: The model has three parts
Step11: We can refer to the parts of the model before and after <tt>get_rnn(False)</tt> returns a vector as the encoder and decoder. The encoder has taken a sequence of embeddings and encoded it into a numerical vector that completely describes it's input, while the decoder transforms that vector into a new sequence.
Step12: To evaluate, we don't want to know what percentage of letters are correct but what percentage of words are.
Step13: The accuracy isn't great.
Step14: We can see that sometimes the mistakes are completely reasonable, occasionally they're totally off. This tends to happen with the longer words that have large phoneme sequences.
Step15: Attention model
Step16: The attentional model doesn't encode into a single vector, but rather a sequence of vectors. The decoder then at every point is passing through this sequence. For example, after the bi-directional RNN we have 16 vectors corresponding to each phoneme's output state. Each output state describes how each phoneme relates between the other phonemes before and after it. After going through more RNN's, our goal is to transform this sequence into a vector of length 15 so we can classify into characters.
Step17: We can now train, passing in the decoder inputs as well for teacher forcing.
Step18: Better accuracy!
Step19: This model is certainly performing better with longer words. The mistakes it's making are reasonable, and it even succesfully formed the word "partisanship".
Step20: Test code for the attention layer
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import importlib
import utils2; importlib.reload(utils2)
from utils2 import *
np.set_printoptions(4)
PATH = 'data/spellbee/'
limit_mem()
from sklearn.model_selection import train_test_split
lines = [l.strip().split(" ") for l in open(PATH+"cmudict-0.7b", encoding='latin1')
if re.match('^[A-Z]', l)]
lines = [(w, ps.split()) for w, ps in lines]
lines[0], lines[-1]
phonemes = ["_"] + sorted(set(p for w, ps in lines for p in ps))
phonemes[:5]
len(phonemes)
p2i = dict((v, k) for k,v in enumerate(phonemes))
letters = "_abcdefghijklmnopqrstuvwxyz*"
l2i = dict((v, k) for k,v in enumerate(letters))
maxlen=15
pronounce_dict = {w.lower(): [p2i[p] for p in ps] for w, ps in lines
if (5<=len(w)<=maxlen) and re.match("^[A-Z]+$", w)}
len(pronounce_dict)
a=['xyz','abc']
[o.upper() for o in a if o[0]=='x'], [[p for p in o] for o in a], [p for o in a for p in o]
maxlen_p = max([len(v) for k,v in pronounce_dict.items()])
maxlen_p
pairs = np.random.permutation(list(pronounce_dict.keys()))
n = len(pairs)
input_ = np.zeros((n, maxlen_p), np.int32)
labels_ = np.zeros((n, maxlen), np.int32)
for i, k in enumerate(pairs):
for j, p in enumerate(pronounce_dict[k]): input_[i][j] = p
for j, letter in enumerate(k): labels_[i][j] = l2i[letter]
go_token = l2i["*"]
dec_input_ = np.concatenate([np.ones((n,1)) * go_token, labels_[:,:-1]], axis=1)
(input_train, input_test, labels_train, labels_test, dec_input_train, dec_input_test
) = train_test_split(input_, labels_, dec_input_, test_size=0.1)
labels_train.shape
labels_train.shape
input_vocab_size, output_vocab_size = len(phonemes), len(letters)
input_vocab_size, output_vocab_size
parms = {'verbose': 0, 'callbacks': [TQDMNotebookCallback(leave_inner=True)]}
lstm_params = {}
dim = 240
def get_rnn(return_sequences= True):
return LSTM(dim, dropout=0.1, recurrent_dropout=0.1,
implementation=1, return_sequences=return_sequences)
inp = Input((maxlen_p,))
x = Embedding(input_vocab_size, 120)(inp)
x = Bidirectional(get_rnn())(x)
x = get_rnn(False)(x)
x = RepeatVector(maxlen)(x)
x = get_rnn()(x)
x = get_rnn()(x)
x = TimeDistributed(Dense(output_vocab_size, activation='softmax'))(x)
model = Model(inp, x)
model.compile(Adam(), 'sparse_categorical_crossentropy', metrics=['acc'])
hist=model.fit(input_train, np.expand_dims(labels_train,-1),
validation_data=[input_test, np.expand_dims(labels_test,-1)],
batch_size=64, **parms, epochs=3)
hist.history['val_loss']
def eval_keras(input):
preds = model.predict(input, batch_size=128)
predict = np.argmax(preds, axis = 2)
return (np.mean([all(real==p) for real, p in zip(labels_test, predict)]), predict)
acc, preds = eval_keras(input_test); acc
def print_examples(preds):
print("pronunciation".ljust(40), "real spelling".ljust(17),
"model spelling".ljust(17), "is correct")
for index in range(20):
ps = "-".join([phonemes[p] for p in input_test[index]])
real = [letters[l] for l in labels_test[index]]
predict = [letters[l] for l in preds[index]]
print(ps.split("-_")[0].ljust(40), "".join(real).split("_")[0].ljust(17),
"".join(predict).split("_")[0].ljust(17), str(real == predict))
print_examples(preds)
import attention_wrapper; importlib.reload(attention_wrapper)
from attention_wrapper import Attention
input_train.shape, dec_input_train.shape
inp = Input((maxlen_p,))
inp_dec = Input((maxlen,))
emb_dec = Embedding(output_vocab_size, 120)(inp_dec)
emb_dec = Dense(dim)(emb_dec)
x = Embedding(input_vocab_size, 120)(inp)
x = Bidirectional(get_rnn())(x)
x = get_rnn()(x)
x = get_rnn()(x)
x = Attention(get_rnn, 3)([x, emb_dec])
x = TimeDistributed(Dense(output_vocab_size, activation='softmax'))(x)
model = Model([inp, inp_dec], x)
model.compile(Adam(), 'sparse_categorical_crossentropy', metrics=['acc'])
hist=model.fit([input_train, dec_input_train], np.expand_dims(labels_train,-1),
validation_data=[[input_test, dec_input_test], np.expand_dims(labels_test,-1)],
batch_size=64, **parms, epochs=3) # Keras 2
hist.history['val_loss']
K.set_value(model.optimizer.lr, 1e-4)
hist=model.fit([input_train, dec_input_train], np.expand_dims(labels_train,-1),
validation_data=[[input_test, dec_input_test], np.expand_dims(labels_test,-1)],
batch_size=64, **parms, epochs=5) # Keras 2
np.array(hist.history['val_loss'])
def eval_keras():
preds = model.predict([input_test, dec_input_test], batch_size=128)
predict = np.argmax(preds, axis = 2)
return (np.mean([all(real==p) for real, p in zip(labels_test, predict)]), predict)
acc, preds = eval_keras(); acc
print("pronunciation".ljust(40), "real spelling".ljust(17),
"model spelling".ljust(17), "is correct")
for index in range(20):
ps = "-".join([phonemes[p] for p in input_test[index]])
real = [letters[l] for l in labels_test[index]]
predict = [letters[l] for l in preds[index]]
print (ps.split("-_")[0].ljust(40), "".join(real).split("_")[0].ljust(17),
"".join(predict).split("_")[0].ljust(17), str(real == predict))
nb_samples, nb_time, input_dim, output_dim = (64, 4, 32, 48)
x = tf.placeholder(np.float32, (nb_samples, nb_time, input_dim))
xr = K.reshape(x,(-1,nb_time,1,input_dim))
W1 = tf.placeholder(np.float32, (input_dim, input_dim)); W1.shape
W1r = K.reshape(W1, (1, input_dim, input_dim))
W1r2 = K.reshape(W1, (1, 1, input_dim, input_dim))
xW1 = K.conv1d(x,W1r,padding='same'); xW1.shape # Keras 2
xW12 = K.conv2d(xr,W1r2,padding='same'); xW12.shape # Keras 2
xW2 = K.dot(x, W1)
x1 = np.random.normal(size=(nb_samples, nb_time, input_dim))
w1 = np.random.normal(size=(input_dim, input_dim))
init = tf.global_variables_initializer() # - added this lines to use a TF session
sess = tf.InteractiveSession()
res = sess.run(xW1, {x:x1, W1:w1})
res2 = sess.run(xW2, {x:x1, W1:w1})
np.allclose(res, res2)
W2 = tf.placeholder(np.float32, (output_dim, input_dim)); W2.shape
h = tf.placeholder(np.float32, (nb_samples, output_dim))
hW2 = K.dot(h,W2); hW2.shape
hW2 = K.reshape(hW2,(-1,1,1,input_dim)); hW2.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Initialise Single-Channel Record from dcpyps. Note that SCRecord takes a list of file names; several SCN files from the same patch can be loaded.
Step3: Load demo mechanism (C&H82 numerical example)
Step4: Check data histograms and probability densities calculated from initial guesses
Step5: Prepare likelihood function
Step6: Run optimisation
Step7: Plot experimental histograms and predicted pdfs
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import sys, time, math
import numpy as np
from numpy import linalg as nplin
from dcpyps.samples import samples
from dcpyps import dataset, mechanism, dcplots, dcio
# LOAD DATA: Burzomato 2004 example set.
scnfiles = [["./samples/glydemo/A-10.scn"],
["./samples/glydemo/B-30.scn"],
["./samples/glydemo/C-100.scn"],
["./samples/glydemo/D-1000.scn"]]
tr = [0.000030, 0.000030, 0.000030, 0.000030]
tc = [0.004, -1, -0.06, -0.02]
conc = [10e-6, 30e-6, 100e-6, 1000e-6]
# Initaialise SCRecord instance.
recs = []
bursts = []
for i in range(len(scnfiles)):
rec = dataset.SCRecord(scnfiles[i], conc[i], tr[i], tc[i])
recs.append(rec)
bursts.append(rec.bursts.intervals())
rec.printout()
# LOAD FLIP MECHANISM USED in Burzomato et al 2004
mecfn = "./samples/mec/demomec.mec"
version, meclist, max_mecnum = dcio.mec_get_list(mecfn)
mec = dcio.mec_load(mecfn, meclist[2][0])
# PREPARE RATE CONSTANTS.
# Fixed rates.
#fixed = np.array([False, False, False, False, False, False, False, True,
# False, False, False, False, False, False])
for i in range(len(mec.Rates)):
mec.Rates[i].fixed = False
# Constrained rates.
mec.Rates[21].is_constrained = True
mec.Rates[21].constrain_func = mechanism.constrain_rate_multiple
mec.Rates[21].constrain_args = [17, 3]
mec.Rates[19].is_constrained = True
mec.Rates[19].constrain_func = mechanism.constrain_rate_multiple
mec.Rates[19].constrain_args = [17, 2]
mec.Rates[16].is_constrained = True
mec.Rates[16].constrain_func = mechanism.constrain_rate_multiple
mec.Rates[16].constrain_args = [20, 3]
mec.Rates[18].is_constrained = True
mec.Rates[18].constrain_func = mechanism.constrain_rate_multiple
mec.Rates[18].constrain_args = [20, 2]
mec.Rates[8].is_constrained = True
mec.Rates[8].constrain_func = mechanism.constrain_rate_multiple
mec.Rates[8].constrain_args = [12, 1.5]
mec.Rates[13].is_constrained = True
mec.Rates[13].constrain_func = mechanism.constrain_rate_multiple
mec.Rates[13].constrain_args = [9, 2]
mec.update_constrains()
# Rates constrained by microscopic reversibility
mec.set_mr(True, 7, 0)
mec.set_mr(True, 14, 1)
# Update constrains
mec.update_constrains()
#Propose initial guesses different from recorded ones
initial_guesses = [5000.0, 500.0, 2700.0, 2000.0, 800.0, 15000.0, 300.0, 120000, 6000.0,
0.45E+09, 1500.0, 12000.0, 4000.0, 0.9E+09, 7500.0, 1200.0, 3000.0,
0.45E+07, 2000.0, 0.9E+07, 1000, 0.135E+08]
#initial_guesses = [3687.69, 6091.43, 2467.35, 32621.5, 7061.15, 129984., 1050.69, 20984., 3387.64,
# 0.166224E+09, 20783.8, 6308.02, 2258.42, 0.332447E+09, 31335.4, 144.530, 831.686,
# 0.620171E+06, 554.457, 0.124034E+07, 277.229, 0.186051E+07]
#initial_guesses = mec.unit_rates()
mec.set_rateconstants(initial_guesses)
mec.update_constrains()
mec.printout()
# Scale for ideal pdf
def scalefac(tres, matrix, phiA):
eigs, M = eig(-matrix)
N = inv(M)
k = N.shape[0]
A, w = np.zeros((k, k, k)), np.zeros(k)
for i in range(k):
A[i] = np.dot(M[:, i].reshape(k, 1), N[i].reshape(1, k))
for i in range(k):
w[i] = np.dot(np.dot(np.dot(phiA, A[i]), (-matrix)), np.ones((k, 1)))
return 1 / np.sum((w / eigs) * np.exp(-tres * eigs))
from dcprogs.likelihood import QMatrix
from dcprogs.likelihood import missed_events_pdf, ideal_pdf, IdealG, eig, inv
fig, axes = plt.subplots(len(recs), 2, figsize=(12,15))
for i in range(len(recs)):
mec.set_eff('c', recs[i].conc)
qmatrix = QMatrix(mec.Q, mec.kA)
idealG = IdealG(qmatrix)
# Plot apparent open period histogram
ipdf = ideal_pdf(qmatrix, shut=False)
iscale = scalefac(recs[i].tres, qmatrix.aa, idealG.initial_occupancies)
epdf = missed_events_pdf(qmatrix, recs[i].tres, nmax=2, shut=False)
dcplots.xlog_hist_HJC_fit(axes[i,0], recs[i].tres, recs[i].opint,
epdf, ipdf, iscale, shut=False)
axes[i,0].set_title('concentration = {0:3f} mM'.format(recs[i].conc*1000))
# Plot apparent shut period histogram
ipdf = ideal_pdf(qmatrix, shut=True)
iscale = scalefac(recs[i].tres, qmatrix.ff, idealG.final_occupancies)
epdf = missed_events_pdf(qmatrix, recs[i].tres, nmax=2, shut=True)
dcplots.xlog_hist_HJC_fit(axes[i,1], recs[i].tres, recs[i].shint,
epdf, ipdf, iscale, tcrit=math.fabs(recs[i].tcrit))
axes[i,1].set_title('concentration = {0:6f} mM'.format(recs[i].conc*1000))
fig.tight_layout()
def dcprogslik(x, lik, m, c):
m.theta_unsqueeze(np.exp(x))
l = 0
for i in range(len(c)):
m.set_eff('c', c[i])
l += lik[i](m.Q)
return -l * math.log(10)
def printiter(theta):
global iternum, likelihood, mec, conc
iternum += 1
if iternum % 100 == 0:
lik = dcprogslik(theta, likelihood, mec, conc)
print("iteration # {0:d}; log-lik = {1:.6f}".format(iternum, -lik))
print(np.exp(theta))
# Import HJCFIT likelihood function
from dcprogs.likelihood import Log10Likelihood
kwargs = {'nmax': 2, 'xtol': 1e-12, 'rtol': 1e-12, 'itermax': 100,
'lower_bound': -1e6, 'upper_bound': 0}
likelihood = []
for i in range(len(recs)):
likelihood.append(Log10Likelihood(bursts[i], mec.kA,
recs[i].tres, recs[i].tcrit, **kwargs))
# Extract free parameters
theta = mec.theta()
print ('\ntheta=', theta)
print('Number of free parameters = ', len(theta))
lik = dcprogslik(np.log(theta), likelihood, mec, conc)
print ("\nInitial likelihood = {0:.6f}".format(-lik))
from scipy.optimize import minimize
print ("\nScyPy.minimize (Nelder-Mead) Fitting started: " +
"%4d/%02d/%02d %02d:%02d:%02d"%time.localtime()[0:6])
iternum = 0
start = time.clock()
start_wall = time.time()
maxiter = 200
# maxiter = 30000
result = minimize(dcprogslik, np.log(theta), args=(likelihood, mec, conc), method='Nelder-Mead', callback=printiter,
options={'xtol':1e-5, 'ftol':1e-5, 'maxiter': maxiter, 'maxfev': 150000, 'disp': True})
t3 = time.clock() - start
t3_wall = time.time() - start_wall
print ("\nScyPy.minimize (Nelder-Mead) Fitting finished: " +
"%4d/%02d/%02d %02d:%02d:%02d"%time.localtime()[0:6])
print ('\nCPU time in ScyPy.minimize (Nelder-Mead)=', t3)
print ('Wall clock time in ScyPy.minimize (Nelder-Mead)=', t3_wall)
print ('\nResult ==========================================\n', result)
print ("\nFinal likelihood = {0:.16f}".format(-result.fun))
mec.theta_unsqueeze(np.exp(result.x))
print ("\nFinal rate constants:")
mec.printout()
fig, axes = plt.subplots(len(recs), 2, figsize=(12,15))
for i in range(len(recs)):
mec.set_eff('c', recs[i].conc)
qmatrix = QMatrix(mec.Q, mec.kA)
idealG = IdealG(qmatrix)
# Plot apparent open period histogram
ipdf = ideal_pdf(qmatrix, shut=False)
iscale = scalefac(recs[i].tres, qmatrix.aa, idealG.initial_occupancies)
epdf = missed_events_pdf(qmatrix, recs[i].tres, nmax=2, shut=False)
dcplots.xlog_hist_HJC_fit(axes[i,0], recs[i].tres, recs[i].opint,
epdf, ipdf, iscale, shut=False)
axes[i,0].set_title('concentration = {0:3f} mM'.format(conc[i]*1000))
# Plot apparent shut period histogram
ipdf = ideal_pdf(qmatrix, shut=True)
iscale = scalefac(recs[i].tres, qmatrix.ff, idealG.final_occupancies)
epdf = missed_events_pdf(qmatrix, recs[i].tres, nmax=2, shut=True)
dcplots.xlog_hist_HJC_fit(axes[i,1], recs[i].tres, recs[i].shint,
epdf, ipdf, iscale, tcrit=math.fabs(recs[i].tcrit))
axes[i,1].set_title('concentration = {0:6f} mM'.format(conc[i]*1000))
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='loading'></a>
Step6: To take a glimpse into the kinds of patterns that the network learned to recognize, we will try to generate images that maximize the sum of activations of particular channel of a particular convolutional layer of the neural network. The network we explore contains many convolutional layers, each of which outputs tens to hundreds of feature channels, so we have plenty of patterns to explore.
Step7: <a id='naive'></a>
Step8: <a id="multiscale"></a>
Step9: <a id="laplacian"></a>
Step10: <a id="playing"></a>
Step11: Lower layers produce features of lower complexity.
Step12: There are many interesting things one may try. For example, optimizing a linear combination of features often gives a "mixture" pattern.
Step13: <a id="deepdream"></a>
Step14: Let's load some image and populate it with DogSlugs (in case you've missed them).
Step15: Note that results can differ from the Caffe's implementation, as we are using an independently trained network. Still, the network seems to like dogs and animal-like features due to the nature of the ImageNet dataset.
|
<ASSISTANT_TASK:>
Python Code:
# boilerplate code
import os
from io import BytesIO
import numpy as np
from functools import partial
import PIL.Image
from IPython.display import clear_output, Image, display, HTML
from __future__ import print_function
import tensorflow as tf
#!wget https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip && unzip inception5h.zip
model_fn = 'tensorflow_inception_graph.pb'
# creating TensorFlow session and loading the model
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(np.float32, name='input') # define the input tensor
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input':t_preprocessed})
layers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
feature_nums = [int(graph.get_tensor_by_name(name+':0').get_shape()[-1]) for name in layers]
print('Number of layers', len(layers))
print('Total number of feature channels:', sum(feature_nums))
# Helper functions for TF Graph visualization
def strip_consts(graph_def, max_const_size=32):
Strip large constant values from graph_def.
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def rename_nodes(graph_def, rename_func):
res_def = tf.GraphDef()
for n0 in graph_def.node:
n = res_def.node.add()
n.MergeFrom(n0)
n.name = rename_func(n.name)
for i, s in enumerate(n.input):
n.input[i] = rename_func(s) if s[0]!='^' else '^'+rename_func(s[1:])
return res_def
def show_graph(graph_def, max_const_size=32):
Visualize TensorFlow graph.
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code =
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
.format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe =
<iframe seamless style="width:800px;height:620px;border:0" srcdoc="{}"></iframe>
.format(code.replace('"', '"'))
display(HTML(iframe))
# Visualizing the network graph. Be sure expand the "mixed" nodes to see their
# internal structure. We are going to visualize "Conv2D" nodes.
tmp_def = rename_nodes(graph_def, lambda s:"/".join(s.split('_',1)))
show_graph(tmp_def)
# Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
# to have non-zero gradients for features with negative initial activations.
layer = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139 # picking some feature channel to visualize
# start with a gray image with a little noise
img_noise = np.random.uniform(size=(224,224,3)) + 100.0
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 1)*255)
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
def visstd(a, s=0.1):
'''Normalize the image range for visualization'''
return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5
def T(layer):
'''Helper for getting layer output tensor'''
return graph.get_tensor_by_name("import/%s:0"%layer)
def render_naive(t_obj, img0=img_noise, iter_n=20, step=1.0):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for i in range(iter_n):
g, score = sess.run([t_grad, t_score], {t_input:img})
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
print(score, end = ' ')
clear_output()
showarray(visstd(img))
render_naive(T(layer)[:,:,:,channel])
def tffunc(*argtypes):
'''Helper that transforms TF-graph generating function into a regular one.
See "resize" function below.
'''
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
# Helper function that uses TF to resize an image
def resize(img, size):
img = tf.expand_dims(img, 0)
return tf.image.resize_bilinear(img, size)[0,:,:,:]
resize = tffunc(np.float32, np.int32)(resize)
def calc_grad_tiled(img, t_grad, tile_size=512):
'''Compute the value of tensor t_grad over the image in a tiled way.
Random shifts are applied to the image to blur tile boundaries over
multiple iterations.'''
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
grad = np.zeros_like(img)
for y in range(0, max(h-sz//2, sz),sz):
for x in range(0, max(w-sz//2, sz),sz):
sub = img_shift[y:y+sz,x:x+sz]
g = sess.run(t_grad, {t_input:sub})
grad[y:y+sz,x:x+sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
def render_multiscale(t_obj, img0=img_noise, iter_n=10, step=1.0, octave_n=3, octave_scale=1.4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for octave in range(octave_n):
if octave>0:
hw = np.float32(img.shape[:2])*octave_scale
img = resize(img, np.int32(hw))
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
print('.', end = ' ')
clear_output()
showarray(visstd(img))
render_multiscale(T(layer)[:,:,:,channel])
k = np.float32([1,4,6,4,1])
k = np.outer(k, k)
k5x5 = k[:,:,None,None]/k.sum()*np.eye(3, dtype=np.float32)
def lap_split(img):
'''Split the image into lo and hi frequency components'''
with tf.name_scope('split'):
lo = tf.nn.conv2d(img, k5x5, [1,2,2,1], 'SAME')
lo2 = tf.nn.conv2d_transpose(lo, k5x5*4, tf.shape(img), [1,2,2,1])
hi = img-lo2
return lo, hi
def lap_split_n(img, n):
'''Build Laplacian pyramid with n splits'''
levels = []
for i in range(n):
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def lap_merge(levels):
'''Merge Laplacian pyramid'''
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5*4, tf.shape(hi), [1,2,2,1]) + hi
return img
def normalize_std(img, eps=1e-10):
'''Normalize image by making its standard deviation = 1.0'''
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img/tf.maximum(std, eps)
def lap_normalize(img, scale_n=4):
'''Perform the Laplacian pyramid normalization.'''
img = tf.expand_dims(img,0)
tlevels = lap_split_n(img, scale_n)
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0,:,:,:]
# Showing the lap_normalize graph with TensorBoard
lap_graph = tf.Graph()
with lap_graph.as_default():
lap_in = tf.placeholder(np.float32, name='lap_in')
lap_out = lap_normalize(lap_in)
show_graph(lap_graph)
def render_lapnorm(t_obj, img0=img_noise, visfunc=visstd,
iter_n=10, step=1.0, octave_n=3, octave_scale=1.4, lap_n=4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
# build the laplacian normalization graph
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0.copy()
for octave in range(octave_n):
if octave>0:
hw = np.float32(img.shape[:2])*octave_scale
img = resize(img, np.int32(hw))
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
g = lap_norm_func(g)
img += g*step
print('.', end = ' ')
clear_output()
showarray(visfunc(img))
render_lapnorm(T(layer)[:,:,:,channel])
render_lapnorm(T(layer)[:,:,:,65])
render_lapnorm(T('mixed3b_1x1_pre_relu')[:,:,:,101])
render_lapnorm(T(layer)[:,:,:,65]+T(layer)[:,:,:,139], octave_n=4)
def render_deepdream(t_obj, img0=img_noise,
iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
# split the image into a number of octaves
img = img0
octaves = []
for i in range(octave_n-1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw)/octave_scale))
hi = img-resize(lo, hw)
img = lo
octaves.append(hi)
# generate details octave by octave
for octave in range(octave_n):
if octave>0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2])+hi
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
img += g*(step / (np.abs(g).mean()+1e-7))
print('.',end = ' ')
clear_output()
showarray(img/255.0)
img0 = PIL.Image.open('pilatus800.jpg')
img0 = np.float32(img0)
showarray(img0/255.0)
render_deepdream(tf.square(T('mixed4c')), img0)
render_deepdream(T(layer)[:,:,:,139], img0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Left singular vectors
Step2: Singular values
Step3: As you can see, the singular values are sorted descendingly.
Step4: Reconstructing the original matrix
Step5: We use all the dimentions to get back to the original matrix. As a result, we obtain the matrix which is almost identical. Let's calculate the difference between the two matrices.
Step6: Expectedly, the error is infinitesimal.
Step7: Again, the reconstructed matrix is very similar to the original one. And the total error is still small.
Step8: As it can be seen above, more rank is used, less error occur. From another perspective, we get closer to the original data by increasing rank number.
|
<ASSISTANT_TASK:>
Python Code:
A = np.mat([
[4, 5, 4, 1, 1],
[5, 3, 5, 0, 0],
[0, 1, 0, 1, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 4, 5],
[0, 1, 0, 5, 4],
])
U, S, V = np.linalg.svd(A)
U.shape, S.shape, V.shape
U
S
np.diag(S)
V
def reconstruct(U, S, V, rank):
return U[:,0:rank] * np.diag(S[:rank]) * V[:rank]
r = len(S)
reconstruct(U, S, V, r)
def calcError(A, B):
return np.sum(np.power(A - B, 2))
calcError(A, reconstruct(U, S, V, r))
reconstruct(U, S, V, 3)
calcError(A, reconstruct(U, S, V, 3))
reconstruct(U, S, V, 2)
calcError(A, reconstruct(U, S, V, 2))
A = np.mat([
[4, 5, 4, 0, 4, 0, 0, 1, 0, 1, 2, 1],
[5, 3, 5, 5, 0, 1, 0, 0, 2, 0, 0, 2],
[0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 5, 0, 0, 4, 5, 4, 0],
[0, 1, 1, 0, 0, 4, 3, 5, 5, 3, 4, 0],
])
def reconstruct(U, S, V, rank):
return U[:,0:rank] * np.diag(S[:rank]) * V[:rank]
for rank in range(1, len(S)):
rA = reconstruct(U, S, V, rank)
error = calcError(A, rA)
coverage = S[:rank].sum() / S.sum()
print("with rank {}, coverage: {:.4f}, error: {:.4f}".format(rank, coverage, error))
print("Original:\n", A[:,10])
print("Reconstructed:\n", reconstruct(U, S, V, 4)[:,10])
imread("data/pacman.png", flatten=True).shape
A = np.mat(imread("data/pacman.png", flatten=True))
U, S, V = np.linalg.svd(A)
A.shape, U.shape, S.shape, V.shape
for rank in range(1, len(S)):
rA = reconstruct(U, S, V, rank)
error = calcError(A, rA)
coverage = S[:rank].sum() / S.sum()
print("with rank {}, coverage: {:.4f}, error: {:.4f}".format(rank, coverage, error))
for i in range(1, 50, 5):
rA = reconstruct(U, S, V, i)
print(rA.shape)
plt.imshow(rA, cmap='gray')
plt.show()
plt.imshow(data, interpolation='nearest')
128 * 128
- (10*128*2)
from PIL import Image
A = np.mat(imread("data/noise.png", flatten=True))
img = Image.open('data/noise.png')
imggray = img.convert('LA')
imgmat = np.array(list(imggray.getdata(band=0)), float)
imgmat = np.array(list(imggray.getdata(band=0)), float)
imgmat.shape = (imggray.size[1], imggray.size[0])
imgmat = np.matrix(imgmat)
plt.figure(figsize=(9,6))
plt.imshow(imgmat, cmap='gray');
plt.show()
U, S, V = np.linalg.svd(imgmat)
for i in range(1, 10, 1):
rA = reconstruct(U, S, V, i)
print(rA.shape)
plt.imshow(rA, cmap='gray');
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Modeling tree growth
Step2: And here's the series of heights for a site with index 45, indicating that height at 30 years is 45 feet.
Step3: Here's the series for site index 65.
Step4: And for site index 85.
Step5: Here's what the curves look like
Step6: For my examples I'll work with the SI 65 data; as an exercise, you can run the notebook again with either of the other curves.
Step7: Model 1
Step9: And here's an update function that takes the current height as a parameter and returns the height during the next time step.
Step10: I'll test the update function with the initial conditions.
Step12: Here's our usual version of run_simulation.
Step13: And here's how we run it.
Step14: Here's what the results look like
Step15: The model converges to a straight line.
Step16: And here's the mean absolute error.
Step17: This model might explain why the height of a tree grows roughly linearly
Step19: I'll wrap the code from the previous section is a function that takes the parameters as inputs and makes a System object.
Step20: Here's how we use it.
Step21: With different values for the parameters, we get curves with different behavior. Here are a few that I chose by hand.
Step23: To find the parameters that best fit the data, I'll use leastsq.
Step24: Here's how we use it
Step25: Now we can pass error_func to leastsq, which finds the parameters that minimize the squares of the errors.
Step26: Using the best parameters we found, we can run the model and plot the results.
Step27: The mean absolute error is better than for Model 1, but that doesn't mean much. The model still doesn't fit the data well.
Step28: And the estimated fractal dimension is 3.11, which doesn't seem likely.
Step30: Here's an updated version of make_system
Step31: Here's the new System object.
Step33: And here's the new update function.
Step34: As always, we'll test the update function with the initial conditions.
Step35: And we'll test the error function with the new update function.
Step36: Now let's search for the best parameters.
Step37: With these parameters, we can fit the data much better.
Step38: And the mean absolute error is substantially smaller.
Step39: The estimated fractal dimension is about 2.6, which is plausible; it suggests that if you double the height of the tree, the mass grows by a factor of $2^{2.6}$
|
<ASSISTANT_TASK:>
Python Code:
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import exists
filename = 'modsim.py'
if not exists(filename):
from urllib.request import urlretrieve
url = 'https://raw.githubusercontent.com/AllenDowney/ModSim/main/'
local, _ = urlretrieve(url+filename, filename)
print('Downloaded ' + local)
# import functions from modsim
from modsim import *
years = [2, 3, 4, 5, 6, 8, 10, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 65, 70]
site45 = TimeSeries([1.4, 1.49, 1.75, 2.18, 2.78, 4.45, 6.74,
14.86, 25.39, 35.60, 45.00, 53.65, 61.60,
68.92, 75.66, 81.85, 87.56, 92.8, 97.63],
index=years)
site65 = TimeSeries([1.4, 1.56, 2.01, 2.76, 3.79, 6.64, 10.44,
23.26, 37.65, 51.66, 65.00, 77.50, 89.07,
99.66, 109.28, 117.96, 125.74, 132.68, 138.84],
index=years)
site85 = TimeSeries([1.4, 1.8, 2.71, 4.09, 5.92, 10.73, 16.81,
34.03, 51.26, 68.54, 85, 100.34, 114.33,
126.91, 138.06, 147.86, 156.39, 163.76, 170.10],
index=years)
site85.plot(label='SI 85')
site65.plot(label='SI 65')
site45.plot(label='SI 45')
decorate(xlabel='Time (years)',
ylabel='Height (feet)')
data = site65
alpha = 7
dim = 3
t_0 = data.index[0]
h_0 = data[t_0]
t_end = data.index[-1]
system = System(alpha=alpha,
dim=dim,
h_0=h_0,
t_0=t_0,
t_end=t_end)
def update(height, t, system):
Update height based on geometric model.
height: current height in feet
t: what year it is
system: system object with model parameters
area = height**2
mass = height**system.dim
mass += system.alpha * area
return mass**(1/system.dim)
update(h_0, t_0, system)
def run_simulation(system, update_func):
Simulate the system using any update function.
system: System object
update_func: function that computes the population next year
returns: TimeSeries
results = TimeSeries()
results[system.t_0] = system.h_0
for t in linrange(system.t_0, system.t_end-1):
results[t+1] = update_func(results[t], t, system)
return results
results = run_simulation(system, update)
results.tail()
def plot_results(results, data):
results.plot(style=':', label='model', color='gray')
data.plot(label='data')
decorate(xlabel='Time (years)',
ylabel='Height (feet)')
plot_results(results, data)
errors = results - data
errors.dropna()
def mean_abs_error(results, data):
return (results-data).abs().mean()
mean_abs_error(results, data)
alpha = 7
dim = 2.5
def make_system(params, data):
Makes a System object.
params: sequence of alpha, dim
data: Series
returns: System object
alpha, dim = params
t_0 = data.index[0]
t_end = data.index[-1]
h_0 = data[t_0]
return System(alpha=alpha, dim=dim,
h_0=h_0, t_0=t_0, t_end=t_end)
params = alpha, dim
system = make_system(params, data)
def run_and_plot(alpha, dim, data):
params = alpha, dim
system = make_system(params, data)
results = run_simulation(system, update)
results.plot(style=':', color='gray', label='_nolegend')
run_and_plot(0.145, 2, data)
run_and_plot(0.58, 2.4, data)
run_and_plot(2.8, 2.8, data)
run_and_plot(6.6, 3, data)
run_and_plot(15.5, 3.2, data)
run_and_plot(38, 3.4, data)
data.plot(label='data')
decorate(xlabel='Time (years)',
ylabel='Height (feet)')
def error_func(params, data, update_func):
Runs the model and returns errors.
params: sequence of alpha, dim
data: Series
update_func: function object
returns: Series of errors
print(params)
system = make_system(params, data)
results = run_simulation(system, update_func)
return (results - data).dropna()
errors = error_func(params, data, update)
best_params, details = leastsq(error_func, params, data, update)
print(details.success)
system = make_system(best_params, data)
results = run_simulation(system, update)
plot_results(results, data)
mean_abs_error(results, data)
alpha = 2.0
dim = 2.5
K = 150
params = [alpha, dim, K]
def make_system(params, data):
Makes a System object.
params: sequence of alpha, dim, K
data: Series
returns: System object
alpha, dim, K = params
t_0 = data.index[0]
t_end = data.index[-1]
h_0 = data[t_0]
return System(alpha=alpha, dim=dim, K=K,
h_0=h_0, t_0=t_0, t_end=t_end)
system = make_system(params, data)
def update3(height, t, system):
Update height based on geometric model with growth limiting term.
height: current height in feet
t: what year it is
system: system object with model parameters
area = height**2
mass = height**system.dim
mass += system.alpha * area * (1 - height/system.K)
return mass**(1/system.dim)
update3(h_0, t_0, system)
error_func(params, data, update3)
best_params, details = leastsq(error_func, params, data, update3)
details.success
system = make_system(best_params, data)
results = run_simulation(system, update3)
plot_results(results, data)
mean_abs_error(results, data)
2**2.6
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: or,
Step2: People who want to use an explicit version
Step3: or,
Step4: or even, (less preferred)
Step5: The last option makes it easy to update to a new version in one place per file, once you've made the deliberate action to do this.
Step6: or,
Step7: or (less preferred)
Step9: How parsing works
Step10: In the example above if a 2.1 or higher object is parsed, the operation will fail.
|
<ASSISTANT_TASK:>
Python Code:
import stix2
stix2.Indicator()
from stix2 import Indicator
Indicator()
import stix2.v20
stix2.v20.Indicator()
from stix2.v20 import Indicator
Indicator()
import stix2.v20 as stix2
stix2.Indicator()
import stix2
stix2.v20.Indicator()
stix2.v21.Indicator()
from stix2 import v20, v21
v20.Indicator()
v21.Indicator()
from stix2.v20 import Indicator as Indicator_v20
from stix2.v21 import Indicator as Indicator_v21
Indicator_v20()
Indicator_v21()
from stix2 import parse
indicator = parse({
"type": "indicator",
"id": "indicator--dbcbd659-c927-4f9a-994f-0a2632274394",
"created": "2017-09-26T23:33:39.829Z",
"modified": "2017-09-26T23:33:39.829Z",
"labels": [
"malicious-activity"
],
"name": "File hash for malware variant",
"pattern": "[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']",
"valid_from": "2017-09-26T23:33:39.829952Z"
}, version="2.0")
print(indicator.serialize(pretty=True))
import stix2
# Make my custom observable available in STIX 2.0
@stix2.v20.CustomObservable('x-new-object-type',
[("prop", stix2.properties.BooleanProperty())])
class NewObject2(object):
pass
# Make my custom observable available in STIX 2.1
@stix2.v21.CustomObservable('x-new-object-type',
[("prop", stix2.properties.BooleanProperty())])
class NewObject2(object):
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's imagine we measure 2 quantities, $x_1$ and $x_2$ for some objects, and we know the classes that these objects belong to, e.g., "star", 0, or "galaxy", 1 (maybe we classified these objects by hand, or knew through some other means). We now observe ($x_1$, $x_2$) for some new object and want to know whether it belongs in class 0 or 1.
Step2: We now observe a new point, and would like to know which class it belongs to
Step3: KNN works by predicting the class of a new point based on the classes of the K training data points closest to the new point. The two things that can be customized about this method are K, the number of points to use, and the distance metric used to compute the distances between the new point and the training data. If the dimensions in your data are measured with different units or with very different measurement uncertainties, you might need to be careful with the way you choose this metric. For simplicity, we'll start by fixing K=16 and use a Euclidean distance to see how this works in practice
Step4: All of the closest points are from class 1, so we would classify the new point as class=1. If there is a mixture of possible classes, take the class with more neighbors. If it's a tie, choose a class at random. That's it! Let's see how to use the KNN classifier in scikit-learn
Step5: Let's visualize the decision boundary of this classifier by evaluating the predicted class for a grid of trial data
Step6: KNN is very simple, but is very fast and is therefore useful in problems with large or wide datasets.
Step7: What does the decision boundary look like in this case, as a function of the number of neighbors, K
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('notebook.mplstyle')
%matplotlib inline
from scipy.stats import mode
a = np.random.multivariate_normal([1., 0.5],
[[4., 0.],
[0., 0.25]], size=512)
b = np.random.multivariate_normal([10., 8.],
[[1., 0.],
[0., 25]], size=1024)
X = np.vstack((a,b))
y = np.concatenate((np.zeros(len(a)),
np.ones(len(b))))
X.shape, y.shape
plt.figure(figsize=(6,6))
plt.scatter(X[:,0], X[:,1], c=y, cmap='RdBu', marker='.', alpha=0.4)
plt.xlim(-10, 20)
plt.ylim(-10, 20)
plt.title('Training data')
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.tight_layout()
np.random.seed(42)
new_pt = np.random.uniform(-10, 20, size=2)
plt.figure(figsize=(6,6))
plt.scatter(X[:,0], X[:,1], c=y, cmap='RdBu', marker='.', alpha=0.5, linewidth=0)
plt.scatter(new_pt[0], new_pt[1], marker='+', color='g', s=100, linewidth=3)
plt.xlim(-10, 20)
plt.ylim(-10, 20)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.tight_layout()
K = 16
def distance(pts1, pts2):
pts1 = np.atleast_2d(pts1)
pts2 = np.atleast_2d(pts2)
return np.sqrt( (pts1[:,0]-pts2[:,0])**2 + (pts1[:,1]-pts2[:,1])**2)
# compute the distance between all training data points and the new point
dists = distance(X, new_pt)
# get the classes (from the training data) of the K nearest points
nearest_classes = y[np.argsort(dists)[:K]]
nearest_classes
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=16)
clf.fit(X, y)
clf.predict(new_pt.reshape(1, -1)) # input has to be 2D
grid_1d = np.linspace(-10, 20, 256)
grid_x1, grid_x2 = np.meshgrid(grid_1d, grid_1d)
grid = np.stack((grid_x1.ravel(), grid_x2.ravel()), axis=1)
y_grid = clf.predict(grid)
plt.figure(figsize=(6,6))
plt.pcolormesh(grid_x1, grid_x2, y_grid.reshape(grid_x1.shape),
cmap='Set3', alpha=1.)
plt.scatter(X[:,0], X[:,1], marker='.', alpha=0.65, linewidth=0)
plt.xlim(-10, 20)
plt.ylim(-10, 20)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.tight_layout()
a = np.random.multivariate_normal([6., 0.5],
[[8., 0.],
[0., 0.25]], size=512)
b = np.random.multivariate_normal([10., 4.],
[[2., 0.],
[0., 8]], size=1024)
X2 = np.vstack((a,b))
y2 = np.concatenate((np.zeros(len(a)),
np.ones(len(b))))
plt.figure(figsize=(6,6))
plt.scatter(X2[:,0], X2[:,1], c=y2, cmap='RdBu', marker='.', alpha=0.4)
plt.xlim(-10, 20)
plt.ylim(-10, 20)
plt.title('Training data')
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.tight_layout()
for K in [4, 16, 64, 256]:
clf2 = KNeighborsClassifier(n_neighbors=K)
clf2.fit(X2, y2)
y_grid2 = clf2.predict(grid)
plt.figure(figsize=(6,6))
plt.pcolormesh(grid_x1, grid_x2, y_grid2.reshape(grid_x1.shape),
cmap='Set3', alpha=1.)
plt.scatter(X2[:,0], X2[:,1], marker='.', alpha=0.65, linewidth=0)
plt.xlim(-10, 20)
plt.ylim(-10, 20)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.title("$K={0}$".format(K))
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Experimental Results from a Decision Tree based NER model
Step3: Rather lackluster performance.
Step4: It appears adding in the morphological features greatly increased classifier performance.<br>
|
<ASSISTANT_TASK:>
Python Code:
import subprocess
Creates models for each fold and runs evaluation with results
featureset = "o"
entity_name = "adversereaction"
for fold in range(1,1): #training has already been done
training_data = "../ARFF_Files/%s_ARFF/_%s/_train/%s_train-%i.arff" % (entity_name, featureset, entity_name, fold)
os.system("python3 decisiontree.py -tr %s" % (training_data))
for fold in range(1,11):
testing_data = "../ARFF_Files/%s_ARFF/_%s/_test/%s_test-%i.arff" % (entity_name, featureset, entity_name, fold)
output = subprocess.check_output("python3 evaluate_decisiontree.py -te %s" % (testing_data), shell=True)
print(output.decode('utf-8'))
import subprocess
Creates models for each fold and runs evaluation with results
featureset = "om"
entity_name = "adversereaction"
for fold in range(1,1): #training has already been done
training_data = "../ARFF_Files/%s_ARFF/_%s/_train/%s_train-%i.arff" % (entity_name, featureset, entity_name, fold)
os.system("python3 decisiontree.py -tr %s" % (training_data))
for fold in range(1,11):
testing_data = "../ARFF_Files/%s_ARFF/_%s/_test/%s_test-%i.arff" % (entity_name, featureset, entity_name, fold)
output = subprocess.check_output("python3 evaluate_decisiontree.py -te %s" % (testing_data), shell=True)
print(output.decode('utf-8'))
import graphviz
from sklearn.externals import joblib
from Tools import arff_converter
from sklearn import tree
featureset = "o"
entity_name = "adversereaction"
fold = 3
training_data = "../ARFF_Files/%s_ARFF/_%s/_train/%s_train-%i.arff" % (entity_name, featureset, entity_name, fold)
dataset = arff_converter.arff2df(training_data)
dtree = joblib.load('../Models/decisiontree/adversereaction_o/decisiontree_o_adversereaction_train-%i.arff.pkl' % fold)
tree.export_graphviz(dtree, out_file="visual/temptree.dot",
feature_names=dataset.columns.values[:-1],
class_names=["Entity", "Non-Entity"], label='all',
filled=True, rounded=True, proportion=False, leaves_parallel=True,
special_characters=True)
with open("visual/temptree.dot") as f:
dot_graph = f.read()
graphviz.Source(dot_graph)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Comparison between extracted body text and raw text
Step2: By reading a few emails we can see that the extracted body text is just the text that the email sender wrote (as stated on Kaggle) while the raw text gathers the previous emails forwarded or the whole discussion. Note that the extracted body text can sometimes contain NaNs. By including repeated messages in the raw text, you induce bias in the distribution of the words, thus we kept only the body text
Step3: Comparison between the word clouds
Step4: Plotting the foreign policy
|
<ASSISTANT_TASK:>
Python Code:
from wordcloud import WordCloud
from nltk.corpus import stopwords
from nltk.sentiment import *
import pandas as pd
import numpy as np
import nltk
import time
import matplotlib.pyplot as plt
import seaborn as sns
import pycountry
%matplotlib inline
# import data
directory = 'hillary-clinton-emails/'
aliases = pd.read_csv(directory+'aliases.csv')
email_receivers = pd.read_csv(directory+'EmailReceivers.csv')
emails = pd.read_csv(directory+'Emails.csv')
persons = pd.read_csv(directory+'Persons.csv')
i = 2
print(emails['ExtractedBodyText'][i], '\n\n END OF BODY TEXT \n\n', emails['RawText'][i])
# raw corpus
text_corpus = emails.ExtractedBodyText.dropna().values
raw_text = ' '.join(text_corpus)
# generate wordcloud
wordcloud = WordCloud().generate(raw_text)
plt.figure(figsize=(15,10))
plt.imshow(wordcloud)
plt.axis('off');
def preprocess(text, stemmer):
print('Length of raw text: ', len(raw_text))
# tokenization (need to install models/punk from nltk.download())
tokens = nltk.word_tokenize(raw_text, language='english')
print('Number of tokens extracted: ', len(tokens))
# stopwords removal (need to install stopwords corpus in corpora/stopwords)
# cach stopwords to improve performance (70x speedup)
cached_stopwords = set(stopwords.words('english'))
filtered_tokens = [word for word in tokens if word not in cached_stopwords]
print('Number of tokens after stopword removal: ', len(filtered_tokens))
# stemming
if stemmer == 'snowball':
stemmer = nltk.SnowballStemmer('english')
elif stemmer == 'porter':
stemmer = nltk.PorterStemmer('english')
else:
print('choose appropriate stemmer')
stemmed_filtered_tokens = [stemmer.stem(t) for t in filtered_tokens]
# dump array in text file
output = ' '.join(stemmed_filtered_tokens)
with open("preprocessed_text.txt", "w") as text_file:
text_file.write(output)
preprocess(raw_text, 'snowball')
preprocessed_text = open('preprocessed_text.txt').read()
wordcloud2 = WordCloud().generate(preprocessed_text)
plt.figure(figsize=(15,10))
plt.imshow(wordcloud2)
plt.axis('off');
def find_countries(tokens):
# find countries in a list of token
countries = []
for token in tokens:
try:
# search for any alpha_2 country name e.g. US, CH
country = pycountry.countries.get(alpha_2=token)
countries.append(country.name)
except KeyError:
try:
# search for any alpha_3 country name e.g. USA, CHE
country = pycountry.countries.get(alpha_3=token)
countries.append(country.name)
except KeyError:
try:
# search for a country by its name, title() upper cases every first letter but lower cases
# the other, hence it is handled last, but it deals with country written in lower case
country = pycountry.countries.get(name=token.title())
countries.append(country.name)
except KeyError: pass
return list(set(countries))
def foreign_policy(emails, sentiment_analyzer):
start_time = time.time()
words_to_be_removed = ["RE", "FM", "TV", "LA", "AL", "BEN", "AQ", "AND", "AM", "AT", "IN", "NO", "PM", "TO",
"BY", "IE", "IT", "MS"]
vader_analyzer = SentimentIntensityAnalyzer()
foreign_policy = {}
cached_stopwords = set(stopwords.words('english'))
cached_stopwords.update(words_to_be_removed)
i=0
for email in emails: # TODO: regex instead of tokens lookup parce que ca prend trop de teeeeemps
#print('{:d} / {:d} emails processed'.format(i, len(emails)))
tokens = nltk.word_tokenize(email, language='english')
tokens = [word for word in tokens if word not in cached_stopwords]
# country lookup in tokens
countries = find_countries(tokens)
i +=1
if not countries: continue
if sentiment_analyzer =='vader':
sentiment = vader_analyzer.polarity_scores(email)
score = sentiment['compound']
#elif sentiment_analyzer ==''
for country in countries:
if not country in foreign_policy.keys():
foreign_policy.update({country: [score, 1]})
else:
foreign_policy[country][0] += score
foreign_policy[country][1] += 1
for country, value in foreign_policy.items():
foreign_policy.update({country: [(value[0]/value[1]), value[1]]})
print("--- %d seconds elapsed ---" % (time.time() - start_time))
return foreign_policy
result = foreign_policy(text_corpus, sentiment_analyzer='vader')
result
pycountry.countries.get(name='Honduras')
def create_palette(sentiments):
color_palette = []
minimum = np.min(sentiments)
maximum = np.max(sentiments)
for sentiment in sentiments:
rescaled = (sentiment-minimum) / (maximum - minimum)
g = rescaled
r = 1 - g
color_palette.append((r,g,0))
return color_palette
df = pd.DataFrame.from_dict(result, orient='index')
df.reset_index(inplace=True)
df.columns =['Country', 'Sentiment', 'Count']
df = df[df['Count'] > 15]
df = df.sort_values('Sentiment', ascending=False)
gradient = create_palette(df['Sentiment'].values)
plt.figure(figsize=(15,7))
plot = sns.barplot(x='Country', y='Count', data=df, orient='vertical', palette=gradient)
plt.xticks(rotation=45);
plt.ylabel('Sentiment towards country');
pycountry.countries.get(name='Palau')
test_sentence = "and here I am AM TO speaking of France"
test_sentence = "This is a typical sentence, with don't. Punkts, something e.g. words US, U.S.A"
cached_stopwords = set(stopwords.words('english'))
words_to_be_removed = ["RE", "FM", "TV", "LA", "AL", "BEN", "AQ", "AND", "AM", "AT"]
cached_stopwords.update(words_to_be_removed)
tokens = nltk.word_tokenize(test_sentence)
#tokens = [word for word in tokens if word not in cached_stopwords]
countries = find_countries(tokens)
print(tokens)
test_sentence = 'This is a very pleasant day.'
#test_sentence = 'this is a completely neutral sentence'
polarity = {'Positive': 1, 'Neutral': 0, 'Negative': -1}
vader_analyzer = SentimentIntensityAnalyzer()
tokens = nltk.word_tokenize(test_sentence)
#tokens.remove('is')
result = ' '.join(tokens)
sentiment = vader_analyzer.polarity_scores(result)
#mean = -sentiment['neg'] + sentiment['pos']
#print(sentiment, mean)
np.max(sentiment.values())
test_set = ['nice nice good USA US switzerland', 'bad good bad bad bad libya', 'Switzerland good nice nice']
words_to_be_removed = ["RE", "FM", "TV", "LA", "AL", "BEN", "AQ"]
vader_analyzer = SentimentIntensityAnalyzer()
country_counts = {}
country_sentiments = {}
foreign_policy = {}
for email in test_set:
tokens = nltk.word_tokenize(email, language='english')
tokens = [word for word in tokens if word not in words_to_be_removed]
clean_email = ' '.join(tokens)
sentiment = vader_analyzer.polarity_scores(clean_email)
score = sentiment['compound']
# country lookup in raw text
countries = find_countries(tokens)
for country in countries:
if not country in foreign_policy.keys():
foreign_policy.update({country: [score, 1]})
else:
foreign_policy[country][0] += score
foreign_policy[country][1] += 1
for country, value in foreign_policy.items():
foreign_policy.update({country: [(value[0]/value[1]), value[1]]})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Can you describe what this code did?
Step2: Excellent! You have just written and executed your very first program! Please make sure to run every single one of the following code blocks in the same manner - otherwise a lot of the examples won't properly work.
Step3: If you vaguely remember your math-classes in school, this should look familiar. It is basically the same notation with the name of the variable on the left, the value on the right, and the = sign in the middle.
Step4: So far, we have only used a variable called x. Nevertheless, we are entirely free to change the names of our variables, as long as these names do not contain strange characters, such as spaces, numbers or punctuation marks. (Underscores, however, are allowed inside names!) In the following block, we assign the outcome of our calculation to a variable that has a more meaningful name than the abstract name x.
Step5: In Python we can also copy the contents of a variable into another variable, which is what happens in the block below. You should of course watch out in such cases
Step6: Remember
Step7: Variables are also case sensitive, accessing months is not the same as Months
Step8: So far we have only assigned numbers such as 2 or 70560 to our variables. Such whole numbers are called 'integers' in programming, because they don't have anymore digits 'after the dot'. Numbers that do have digits after the dot (e.g. 67.278 or 191.200), are called 'floating-point numbers' in programming or simply 'floats'. Note that Python uses dots in floats, whereas some European languages use a comma here. Both integers and floats can be positive numbers (e.g. 70 or 4.36) as well as negative numbers (e.g. -70 or 4.36). You can just as easily assign floats to variables
Step9: On the whole, the difference between integers and floats is of course important for divisions where you often end up with floats
Step10: You will undoubtedly remember from your math classes in high school that there is something called 'operator precedence', meaning that multiplication, for instance, will always be executed before subtraction. In Python you can explicitly set the order in which arithmetic operations are executed, using round brackets. Compare the following lines of code
Step11: Using the operators we have learned about above, we can change the variables in our code as many times as we want. We can assign new values to old variables, just like we can put new or more things in the boxes which we already had. Say, for instance, that yesterday we counted how many books we have in our office and that we stored this count in our code as follows
Step12: Suppose that we buy a new book for the office today
Step13: Updates like these happen a lot. Python therefore provides a shortcut and you can write the same thing using +=.
Step14: This special shortcut (+=) is called an operator too. Apart from multiplication (+=), the operator has variants for subtraction (-=), multiplication (*=) and division (/=) too
Step15: What we have learnt
Step16: Such a piece of text ("The Lord of the Flies") is called a 'string' in Python (cf. a string of characters). Strings in Python must always be enclosed with 'quotes' (either single or double quotes). Without those quotes, Python will think it's dealing with the name of some variable that has been defined earlier, because variable names never take quotes. The following distinction is confusing, but extremely important
Step17: Some of the arithmetic operators we saw earlier can also be used to do useful things with strings. Both the multiplication operator (*) and the addition operator (+) provide interesting functionality for dealing with strings, as the block below illustrates.
Step18: Adding strings together is called 'string concatenation' or simply 'concatenation' in programming. Use the block below to find out whether you could can also use the shortcut += operator for adding an 'h' to the variable original_string. Don't forget to check the result by printing it!
Step19: We now would like you to write some code that defines a variable, name, and assign to it a string that is your name. If your first name is shorter than 5 characters, use your last name. If your last name is also shorter than 5 characters, use the combination of your first and last name. Now print the variable containing your name to the screen.
Step20: Strings are called strings because they consist of a series (or 'string') of individual characters. We can access these individual characters in Python with the help of 'indexing', because each character in a string has a unique 'index'. To print the first letter of your name, you can type
Step21: Take a look at the string "Mr White". We use the index 0 to access the first character in the string. This might seem odd, but remember that all indexes in Python start at zero. Whenever you count in Python, you start at 0 instead of 1. Note that the space character gets an index too, namely 2. This is something you will have to get used to!
Step22: It is rather inconvenient having to know how long our strings are if we want to find out what its last letter is. Python provides a simple way of accessing a string 'from the rear'
Step23: To access the last character in a string you have to use the index [-1]. Alternatively, there is the len() command which returns the length of a string
Step24: Do you understand the following code block? Can you explain what is happening?
Step25: Now can you write some code that defines a variable but_last_letter and assigns to this variable the one but last letter of your name?
Step26: You're starting to become a real expert in indexing strings. Now what if we would like to find out what the last two or three letters of our name are? In Python we can use so-called 'slice-indexes' or 'slices' for short. To find the first two letters of our name we type in
Step27: The 0 index is optional, so we could just as well type in name[
Step28: Because we did not specify the end index, Python continues until it reaches the end of our string. If we would like to find out what the last two letters of our name are, we can type in
Step29: DIY
Step30: Given the following two words, can you write code that prints out the word humanities using only slicing and concatenation? (So, no quotes are allowed in your code.) Can you print out how many characters the word humanities counts?
Step31: "Casting" variables
Step32: This should raise an error on your machine
Step34: Other types of conversions are possible as well, and we will see a couple of them in the next chapters. Because variables can change data type anytime they want, we say that Python uses 'dynamic typing', as opposed to other more 'strict' languages that use 'strong typing'. You can check a variable's type using the type()command.
Step35: So, how many ways are there to comment on your code in Python?
Step36: Ex. 2
Step37: Ex. 3
Step38: Ex. 4
Step39: Ex. 5
Step40: Ex. 6
Step41: Ex. 7
Step42: Ex. 8
Step43: Ex. 9
Step44: You've reached the end of Chapter 1! You can safely ignore the code block below -- it's only there to make the page prettier.
|
<ASSISTANT_TASK:>
Python Code:
print("Mike")
# insert your own code here!
x = 5
print(x)
x = 2
print(x)
print(x * x)
print(x + x)
print(x - 6)
seconds_in_seven_weeks = 70560
print(seconds_in_seven_weeks)
first_number = 5
second_number = first_number
first_number = 3
print(first_number)
print(second_number)
# not recommended...
months = 70560
print(months)
print(months)
print(Months)
some_float = 23.987
print(some_float)
some_float = -4.56
print(some_float)
x = 5/2
print(x)
nr1 = 10-2/4
nr2 = (10-2)/4
nr3 = 10-(2/4)
print(nr1)
print(nr2)
print(nr3)
number_of_books = 100
number_of_books = number_of_books + 1
print(number_of_books)
number_of_books += 5
print(number_of_books)
number_of_books -= 5
print(number_of_books)
number_of_books *= 2
print(number_of_books)
number_of_books /= 2
print(number_of_books)
book = "The Lord of the Flies"
print(book)
name = "Bonny"
Bonny = "name"
Clyde = "Clyde"
print(name)
print (Bonny)
print(Clyde)
original_string = "bla"
new_string = 2*original_string
print(new_string)
new_string = new_string+"h"
print(new_string)
original_string = "blabla"
# add an 'h'...
print(original_string)
# your name code goes here...
first_letter = name[0]
print(first_letter)
last_letter = name[# fill in the last index of your name (tip indexes start at 0)]
print(last_letter)
last_letter = name[-1]
print(last_letter)
print(len(name))
print(name[len(name)-1])
but_last_letter = name[# insert your code here]
print(but_last_letter)
first_two_letters = name[0:2]
print(first_two_letters)
without_first_two_letters = name[2:]
print(without_first_two_letters)
last_two_letters = name[-2:]
print(last_two_letters)
# insert your middle_letters code here
word1 = "human"
word2 = "opportunities"
x = "5"
y = 2
print(x + y)
x = "5"
y = 2
print(x + str(y))
print(int(x) + y)
# comment: insert your code here.
# BTW: Have you noticed that everything behind the hashtag
print("Something...") # on a line is ignored by your python interpreter?
print("and something else..") # this is really helpful to comment on your code!
Another way
of commenting on your code is via
triple quotes -- these can be distributed over multiple # lines
print("Done.")
# your code goes here
print("A message").
print("A message')
print('A messagef"')
# ZeroDivisionError
# insert your code here
# numbers
# average
# circle code
# try out the modulus operator!
# cashier code
from IPython.core.display import HTML
def css_styling():
styles = open("styles/custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Logging
Step2: Initialization
Step3: Mapping gene ID --> sequence
Step4: Mapping representative sequence --> structure
Step5: Homology models
Step6: Downloading and ranking structures
Step7: Computing and storing protein properties
Step8: Additional annotations
Step9: Adding more properties
Step10: Global protein properties
Step11: Local protein properties
Step12: Column definitions
Step13: Comparing features in different structures of the same protein
|
<ASSISTANT_TASK:>
Python Code:
import sys
import logging
# Import the GEM-PRO class
from ssbio.pipeline.gempro import GEMPRO
# Printing multiple outputs per cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Create logger
logger = logging.getLogger()
logger.setLevel(logging.INFO) # SET YOUR LOGGING LEVEL HERE #
# Other logger stuff for Jupyter notebooks
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M")
handler.setFormatter(formatter)
logger.handlers = [handler]
# SET FOLDERS AND DATA HERE
import tempfile
ROOT_DIR = tempfile.gettempdir()
PROJECT = 'ssbio_protein_properties'
LIST_OF_GENES = ['b1276', 'b0118']
# Create the GEM-PRO project
my_gempro = GEMPRO(gem_name=PROJECT, root_dir=ROOT_DIR, genes_list=LIST_OF_GENES, pdb_file_type='pdb')
# UniProt mapping
my_gempro.uniprot_mapping_and_metadata(model_gene_source='ENSEMBLGENOME_ID')
print('Missing UniProt mapping: ', my_gempro.missing_uniprot_mapping)
my_gempro.df_uniprot_metadata.head()
# Set representative sequences
my_gempro.set_representative_sequence()
print('Missing a representative sequence: ', my_gempro.missing_representative_sequence)
my_gempro.df_representative_sequences.head()
# Mapping using the PDBe best_structures service
my_gempro.map_uniprot_to_pdb(seq_ident_cutoff=.3)
my_gempro.df_pdb_ranking.head()
# Mapping using BLAST
my_gempro.blast_seqs_to_pdb(all_genes=True, seq_ident_cutoff=.7, evalue=0.00001)
my_gempro.df_pdb_blast.head(2)
import pandas as pd
import os.path as op
# Creating manual mapping dictionary for ECOLI I-TASSER models
homology_models = '/home/nathan/projects_archive/homology_models/ECOLI/zhang/'
homology_models_df = pd.read_csv('/home/nathan/projects_archive/homology_models/ECOLI/zhang_data/160804-ZHANG_INFO.csv')
tmp = homology_models_df[['zhang_id','model_file','m_gene']].drop_duplicates()
tmp = tmp[pd.notnull(tmp.m_gene)]
homology_model_dict = {}
for i,r in tmp.iterrows():
homology_model_dict[r['m_gene']] = {r['zhang_id']: {'model_file':op.join(homology_models, r['model_file']),
'file_type':'pdb'}}
my_gempro.get_manual_homology_models(homology_model_dict)
# Creating manual mapping dictionary for ECOLI SUNPRO models
homology_models = '/home/nathan/projects_archive/homology_models/ECOLI/sunpro/'
homology_models_df = pd.read_csv('/home/nathan/projects_archive/homology_models/ECOLI/sunpro_data/160609-SUNPRO_INFO.csv')
tmp = homology_models_df[['sunpro_id','model_file','m_gene']].drop_duplicates()
tmp = tmp[pd.notnull(tmp.m_gene)]
homology_model_dict = {}
for i,r in tmp.iterrows():
homology_model_dict[r['m_gene']] = {r['sunpro_id']: {'model_file':op.join(homology_models, r['model_file']),
'file_type':'pdb'}}
my_gempro.get_manual_homology_models(homology_model_dict)
# Download all mapped PDBs and gather the metadata
my_gempro.download_all_pdbs()
my_gempro.df_pdb_metadata.head(2)
# Set representative structures
my_gempro.set_representative_structure()
my_gempro.df_representative_structures.head()
# Requires EMBOSS "pepstats" program
# See the ssbio wiki for more information: https://github.com/SBRG/ssbio/wiki/Software-Installations
# Install using:
# sudo apt-get install emboss
my_gempro.get_sequence_properties()
# Requires SCRATCH installation, replace path_to_scratch with own path to script
# See the ssbio wiki for more information: https://github.com/SBRG/ssbio/wiki/Software-Installations
my_gempro.get_scratch_predictions(path_to_scratch='scratch',
results_dir=my_gempro.data_dir,
num_cores=4)
my_gempro.find_disulfide_bridges(representatives_only=False)
# Requires DSSP installation
# See the ssbio wiki for more information: https://github.com/SBRG/ssbio/wiki/Software-Installations
my_gempro.get_dssp_annotations()
# Requires MSMS installation
# See the ssbio wiki for more information: https://github.com/SBRG/ssbio/wiki/Software-Installations
my_gempro.get_msms_annotations()
# for g in my_gempro.genes_with_a_representative_sequence:
# g.protein.representative_sequence.feature_path = '/path/to/new/feature/file.gff'
# Kyte-Doolittle scale for hydrophobicity
kd = { 'A': 1.8,'R':-4.5,'N':-3.5,'D':-3.5,'C': 2.5,
'Q':-3.5,'E':-3.5,'G':-0.4,'H':-3.2,'I': 4.5,
'L': 3.8,'K':-3.9,'M': 1.9,'F': 2.8,'P':-1.6,
'S':-0.8,'T':-0.7,'W':-0.9,'Y':-1.3,'V': 4.2 }
# Use Biopython to calculated hydrophobicity using a set sliding window length
from Bio.SeqUtils.ProtParam import ProteinAnalysis
window = 7
for g in my_gempro.genes_with_a_representative_sequence:
# Create a ProteinAnalysis object -- see http://biopython.org/wiki/ProtParam
my_seq = g.protein.representative_sequence.seq_str
analysed_seq = ProteinAnalysis(my_seq)
# Calculate scale
hydrophobicity = analysed_seq.protein_scale(param_dict=kd, window=window)
# Correct list length by prepending and appending "inf" (result needs to be same length as sequence)
for i in range(window//2):
hydrophobicity.insert(0, float("Inf"))
hydrophobicity.append(float("Inf"))
# Add new annotation to the representative sequence's "letter_annotations" dictionary
g.protein.representative_sequence.letter_annotations['hydrophobicity-kd'] = hydrophobicity
# Printing all global protein properties
from pprint import pprint
# Only looking at 2 genes for now, remove [:2] to gather properties for all
for g in my_gempro.genes_with_a_representative_sequence[:2]:
repseq = g.protein.representative_sequence
repstruct = g.protein.representative_structure
repchain = g.protein.representative_chain
print('Gene: {}'.format(g.id))
print('Number of structures: {}'.format(g.protein.num_structures))
print('Representative sequence: {}'.format(repseq.id))
print('Representative structure: {}'.format(repstruct.id))
print('----------------------------------------------------------------')
print('Global properties of the representative sequence:')
pprint(repseq.annotations)
print('----------------------------------------------------------------')
print('Global properties of the representative structure:')
pprint(repstruct.chains.get_by_id(repchain).seq_record.annotations)
print('****************************************************************')
print('****************************************************************')
print('****************************************************************')
# Looking at all features
for g in my_gempro.genes_with_a_representative_sequence[:2]:
g.id
# UniProt features
[x for x in g.protein.representative_sequence.features]
# Catalytic site atlas features
for s in g.protein.structures:
if s.structure_file:
for c in s.mapped_chains:
if s.chains.get_by_id(c).seq_record:
if s.chains.get_by_id(c).seq_record.features:
[x for x in s.chains.get_by_id(c).seq_record.features]
metal_info = []
for g in my_gempro.genes:
for f in g.protein.representative_sequence.features:
if 'metal' in f.type.lower():
res_info = g.protein.get_residue_annotations(f.location.end, use_representatives=True)
res_info['gene_id'] = g.id
res_info['seq_id'] = g.protein.representative_sequence.id
res_info['struct_id'] = g.protein.representative_structure.id
res_info['chain_id'] = g.protein.representative_chain
metal_info.append(res_info)
cols = ['gene_id', 'seq_id', 'struct_id', 'chain_id',
'seq_residue', 'seq_resnum', 'struct_residue','struct_resnum',
'seq_SS-sspro','seq_SS-sspro8','seq_RSA-accpro','seq_RSA-accpro20',
'struct_SS-dssp','struct_RSA-dssp', 'struct_ASA-dssp',
'struct_PHI-dssp', 'struct_PSI-dssp', 'struct_CA_DEPTH-msms', 'struct_RES_DEPTH-msms']
pd.DataFrame.from_records(metal_info, columns=cols).set_index(['gene_id', 'seq_id', 'struct_id', 'chain_id', 'seq_resnum'])
for g in my_gempro.genes:
# Gather residue numbers
metal_binding_structure_residues = []
for f in g.protein.representative_sequence.features:
if 'metal' in f.type.lower():
res_info = g.protein.get_residue_annotations(f.location.end, use_representatives=True)
metal_binding_structure_residues.append(res_info['struct_resnum'])
print(metal_binding_structure_residues)
# Display structure
view = g.protein.representative_structure.view_structure()
g.protein.representative_structure.add_residues_highlight_to_nglview(view=view, structure_resnums=metal_binding_structure_residues)
view
# Run all sequence to structure alignments
for g in my_gempro.genes:
for s in g.protein.structures:
g.protein.align_seqprop_to_structprop(seqprop=g.protein.representative_sequence, structprop=s)
metal_info_compared = []
for g in my_gempro.genes:
for f in g.protein.representative_sequence.features:
if 'metal' in f.type.lower():
for s in g.protein.structures:
for c in s.mapped_chains:
res_info = g.protein.get_residue_annotations(seq_resnum=f.location.end,
seqprop=g.protein.representative_sequence,
structprop=s, chain_id=c,
use_representatives=False)
res_info['gene_id'] = g.id
res_info['seq_id'] = g.protein.representative_sequence.id
res_info['struct_id'] = s.id
res_info['chain_id'] = c
metal_info_compared.append(res_info)
cols = ['gene_id', 'seq_id', 'struct_id', 'chain_id',
'seq_residue', 'seq_resnum', 'struct_residue','struct_resnum',
'seq_SS-sspro','seq_SS-sspro8','seq_RSA-accpro','seq_RSA-accpro20',
'struct_SS-dssp','struct_RSA-dssp', 'struct_ASA-dssp',
'struct_PHI-dssp', 'struct_PSI-dssp', 'struct_CA_DEPTH-msms', 'struct_RES_DEPTH-msms']
pd.DataFrame.from_records(metal_info_compared, columns=cols).sort_values(by=['seq_resnum','struct_id','chain_id']).set_index(['gene_id','seq_id','seq_resnum','seq_residue','struct_id'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Upload all statistical maps into the data folder
Step2: 3. Run the visualization script
Step3: 4. Look at your data
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
stats_file = '../test_data/ALL_N95_Mean_cope2_thresh_zstat1.nii.gz'
view = 'ortho'
colormap = 'RdBu_r'
threshold = '2.3'
black_bg
%run ../scripts/mni_glass_brain.py --cbar --display_mode $view --cmap $colormap --thr_abs $threshold $stats_file
from IPython.display import Image, display
from glob import glob as gg
outputs = gg('../test_data/*ortho.png')
for o in outputs:
a = Image(filename=o)
display(a)
plotting.plot_glass_brain??
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Look at Chemicals
Step2: Look at a grid of chemicals
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from rdkit import Chem
from rdkit.Chem import Draw
%matplotlib inline
m = Chem.MolFromSmiles('Cc1ccccc1')
Chem.Kekulize(m)
Chem.MolToSmiles(m,kekuleSmiles=True)
fig = Draw.MolToMPL(m)
m2 = Chem.MolFromSmiles('C1=C2C(=CC(=C1Cl)Cl)OC3=CC(=C(C=C3O2)Cl)Cl')
fig2 = Draw.MolToMPL(m2)
m3 = Chem.MolFromSmiles('O=C1OC2=C(C=C1)C1=C(C=CCO1)C=C2')
fig3 = Draw.MolToMPL(m3)
smiles = ("O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C", "CC(C)CCCCCC(=O)NCC1=CC(=C(C=C1)O)OC", "c1(C(=O)O)cc(OC)c(O)cc1")
mols = [Chem.MolFromSmiles(x) for x in smiles]
Draw.MolsToGridImage(mols)
suppl = Chem.SDMolSupplier('data/cdk2.sdf')
d_train = pd.read_csv("train-0.1m.csv")
d_test = pd.read_csv("test.csv")
d_train_test = d_train.append(d_test)
vars_categ = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"]
vars_num = ["DepTime","Distance"]
def get_dummies(d, col):
dd = pd.get_dummies(d.ix[:, col])
dd.columns = [col + "_%s" % c for c in dd.columns]
return(dd)
%time X_train_test_categ = pd.concat([get_dummies(d_train_test, col) for col in vars_categ], axis = 1)
X_train_test = pd.concat([X_train_test_categ, d_train_test.ix[:,vars_num]], axis = 1)
y_train_test = np.where(d_train_test["dep_delayed_15min"]=="Y", 1, 0)
X_train = X_train_test[0:d_train.shape[0]]
y_train = y_train_test[0:d_train.shape[0]]
X_test = X_train_test[d_train.shape[0]:]
y_test = y_train_test[d_train.shape[0]:]
md = LogisticRegression(tol=0.00001, C=1000)
%time md.fit(X_train, y_train)
phat = md.predict_proba(X_test)[:,1]
metrics.roc_auc_score(y_test, phat)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exporting surfaces to Blender
Step2: Editing in Blender
Step3: Back in Python, you can read the fixed .obj files and save them as
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Ezequiel Mikulan <e.mikulan@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import shutil
import mne
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, 'sample', 'bem')
# Put the converted surfaces in a separate 'conv' folder
conv_dir = op.join(subjects_dir, 'sample', 'conv')
os.makedirs(conv_dir, exist_ok=True)
# Load the inner skull surface and create a problem
coords, faces = mne.read_surface(op.join(bem_dir, 'inner_skull.surf'))
coords[0] *= 1.1 # Move the first vertex outside the skull
# Write the inner skull surface as an .obj file that can be imported by
# Blender.
mne.write_surface(op.join(conv_dir, 'inner_skull.obj'), coords, faces,
overwrite=True)
# Also convert the outer skull surface.
coords, faces = mne.read_surface(op.join(bem_dir, 'outer_skull.surf'))
mne.write_surface(op.join(conv_dir, 'outer_skull.obj'), coords, faces,
overwrite=True)
coords, faces = mne.read_surface(op.join(conv_dir, 'inner_skull.obj'))
coords[0] /= 1.1 # Move the first vertex back inside the skull
mne.write_surface(op.join(conv_dir, 'inner_skull_fixed.obj'), coords, faces,
overwrite=True)
# Read the fixed surface
coords, faces = mne.read_surface(op.join(conv_dir, 'inner_skull_fixed.obj'))
# Backup the original surface
shutil.copy(op.join(bem_dir, 'inner_skull.surf'),
op.join(bem_dir, 'inner_skull_orig.surf'))
# Overwrite the original surface with the fixed version
mne.write_surface(op.join(bem_dir, 'inner_skull.surf'), coords, faces,
overwrite=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is a very small dataset.
Step2: About 63% of all observations are good.
Step3: Set Global Parameters
Step5: Train Classifier
Step6: Whats the best dimensionality reduction with single autoencoder?
Step7: The result is implausible. This might be due to a very small number ob observations.
|
<ASSISTANT_TASK:>
Python Code:
# data = np.genfromtxt("data/ionosphere.data")
data = pd.read_csv('data/ionosphere.data', sep=",", header=None)
data.head()
data.describe()
df_tab = data
df_tab[34] = df_tab[34].astype('category')
tab = pd.crosstab(index=df_tab[34], columns="frequency")
tab.index.name = 'Class/Direction'
tab/tab.sum()
data.drop(data.columns[1], inplace=True, axis=1)
data[34] = [1 if e is "g" else 0 for e in data[34]]
# sample the dataframe
data_train = data.sample(frac=0.9, random_state=seed)
data_valid = data.drop(data_train.index)
df_x_train = data_train.iloc[:,:-1]
df_x_train = df_x_train.transform(lambda x: (x - x.min()) / (x.max() - x.min()))
df_y_train = data_train.iloc[:,-1]
df_x_valid = data_valid.iloc[:,:-1]
df_x_valid = df_x_valid.transform(lambda x: (x - x.min()) / (x.max() - x.min()))
df_y_valid = data_valid.iloc[:,-1]
df_x_train.describe()
df_y_train.sum()/len(df_y_train)
x_train = np.array(df_x_train.as_matrix())
y_train = np.array(pd.DataFrame(df_y_train).as_matrix())
x_val = np.array(df_x_valid.as_matrix())
y_val = np.array(pd.DataFrame(df_y_valid).as_matrix())
y_eval = y_val
y_train = keras.utils.to_categorical(y_train, 2)
y_val = keras.utils.to_categorical(y_val, 2)
epochsize = 60
batchsize = 4
shuffle = False
dropout = 0.1
num_classes = 2
input_dim = x_train.shape[1]
hidden1_dim = 40
hidden2_dim = 40
# weights = keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=42)
input_data = Input(shape=(input_dim,), dtype='float32', name='main_input')
hidden_layer1 = Dense(hidden1_dim
, activation='relu'
, input_shape=(input_dim,)
# , kernel_initializer=weights
)(input_data)
dropout1 = Dropout(dropout)(hidden_layer1)
hidden_layer2 = Dense(hidden2_dim
, activation='relu'
, input_shape=(input_dim,)
# , kernel_initializer=weights
)(dropout1)
dropout2 = Dropout(dropout)(hidden_layer2)
output_layer = Dense(num_classes
, activation='sigmoid'
# , kernel_initializer=weights
)(dropout2)
model = Model(inputs=input_data, outputs=output_layer)
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
plot_model(model, to_file='images/ionosphere_nn.png', show_shapes=True, show_layer_names=True)
IPython.display.Image("images/ionosphere_nn.png")
model.fit(x_train, y_train,
batch_size=batchsize,
epochs=epochsize,
verbose=0,
shuffle=shuffle,
validation_split=0.05)
nn_score = model.evaluate(x_val, y_val)[1]
print(nn_score)
fig = plt.figure(figsize=(20,10))
plt.plot(model.history.history['val_acc'])
plt.plot(model.history.history['acc'])
plt.axhline(y=nn_score, c="red")
plt.text(0, nn_score, "test: " + str(round(nn_score, 4)), fontdict=font)
plt.title('model accuracy for neural net with 2 hidden layers')
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.legend(['valid', 'train'], loc='lower right')
plt.show()
import itertools
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_eval, model.predict(x_val).argmax(axis=-1))
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure(figsize=(16,8))
plot_confusion_matrix(cnf_matrix, classes=['bad', 'good'],
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure(figsize=(16,8))
plot_confusion_matrix(cnf_matrix, classes=['bad', 'good'], normalize=True,
title='Normalized confusion matrix')
# the initial coding dimension s.t. there is no dim reduction at the beginning
encoding_dim = input_dim
result = {'encoding_dim': [], 'auto_classifier_acc': []}
while encoding_dim > 0:
main_input = Input(shape=(input_dim,), dtype='float32', name='main_input')
encoding_layer = Dense(encoding_dim
, activation='relu'
, name='encoder'
# , kernel_initializer='normal'
)
encoding_layer_output = encoding_layer(main_input)
decoding_layer_output = Dense(input_dim, activation='sigmoid'
,name='decoder_output'
# ,kernel_initializer='normal'
)(encoding_layer_output)
x = Dense(hidden1_dim
, activation='relu'
# , kernel_initializer=weights
)(encoding_layer_output)
x = Dropout(dropout)(x)
x = Dense(hidden2_dim
, activation='relu'
# , kernel_initializer=weights
)(x)
x = Dropout(dropout)(x)
classifier_output = Dense(num_classes
, activation='sigmoid'
, name='main_output'
# , kernel_initializer=weights
)(x)
auto_classifier = Model(inputs=main_input, outputs=[classifier_output, decoding_layer_output])
auto_classifier.compile(optimizer=RMSprop(),
loss={'main_output': 'binary_crossentropy', 'decoder_output': 'mean_squared_error'},
loss_weights={'main_output': .2, 'decoder_output': .8},
metrics=['accuracy'])
auto_classifier.fit({'main_input': x_train},
{'main_output': y_train, 'decoder_output': x_train},
epochs=epochsize,
batch_size=batchsize,
shuffle=shuffle,
validation_split=0.05,
verbose=0)
accuracy = auto_classifier.evaluate(x=x_val, y=[y_val, x_val], verbose=1)[3]
result['encoding_dim'].append(encoding_dim)
result['auto_classifier_acc'].append(accuracy)
encoding_dim -=1
result_df = pd.DataFrame(result)
result_df['neural_net_acc'] = nn_score
result_df.head()
fig = plt.figure(figsize=(20,10))
plt.bar(result_df['encoding_dim'], result_df['auto_classifier_acc'])
plt.axhline(y=result_df['neural_net_acc'][0], c="red")
plt.text(0, result_df['neural_net_acc'][0], "best neural net: " + str(round(result_df['neural_net_acc'][0], 4))
,fontdict=font)
plt.title('model accuracy for different encoding dimensions')
plt.ylabel('accuracy')
plt.xlabel('dimension')
plt.ylim(0.6, 1)
result_df.to_csv('results/ionosphere_results.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run the following cell to create your Cloud Storage bucket if it does not already exist.
Step2: Timestamp
Step3: Import libraries
Step4: Download and preprocess the data
Step5: Read the data with Pandas
Step6: Next, you will separate the data into features ('data') and labels ('labels').
Step7: Split data into train and test sets
Step8: Build, train, and evaluate our model with Keras
Step9: Create an input data pipeline with tf.data
Step10: Train the model
Step11: Evaluate the trained model locally
Step12: Export the model as a TF 2.x SavedModel
Step13: Use TensorFlow's saved_model_cli to inspect the model's SignatureDef. We'll use this information when we deploy our model to AI Explanations in the next section.
Step14: Deploy the model to AI Explanations
Step15: Since this is a regression model (predicting a numerical value), the baseline prediction will be the same for every example we send to the model. If this were instead a classification model, each class would have a different baseline prediction.
Step16: Create the model version
Step17: Get predictions and explanations
Step18: Send the explain request
Step19: Understanding the explanations response
Step20: Next let's look at the feature attributions for this particular example. Positive attribution values mean a particular feature pushed your model prediction up by that amount, and vice versa for negative attribution values.
Step21: Check your explanations and baselines
Step22: In the function below you perform two sanity checks for models using Integrated Gradient (IG) explanations and one sanity check for models using Sampled Shapley.
Step23: Understanding AI Explanations with the What-If Tool
Step24: What-If Tool visualization ideas
|
<ASSISTANT_TASK:>
Python Code:
import os
PROJECT_ID = "dougkelly-sandbox" # TODO: your PROJECT_ID here.
os.environ["PROJECT_ID"] = PROJECT_ID
BUCKET_NAME = "xai-labs" # TODO: your BUCKET_NAME here.
REGION = "us-central1"
os.environ['BUCKET_NAME'] = BUCKET_NAME
os.environ['REGION'] = REGION
%%bash
exists=$(gsutil ls -d | grep -w gs://${BUCKET_NAME}/)
if [ -n "$exists" ]; then
echo -e "Bucket gs://${BUCKET_NAME} already exists."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${BUCKET_NAME}
echo -e "\nHere are your current buckets:"
gsutil ls
fi
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
import tensorflow as tf
import pandas as pd
# should be >= 2.1
print("Tensorflow version " + tf.__version__)
if tf.__version__ < "2.1":
raise Exception("TF 2.1 or greater is required")
!pip install explainable-ai-sdk
import explainable_ai_sdk
# Copy the data to your notebook instance
! gsutil cp 'gs://explanations_sample_data/bike-data.csv' ./
data = pd.read_csv('bike-data.csv')
# Shuffle the data
data = data.sample(frac=1, random_state=2)
# Drop rows with null values
data = data[data['wdsp'] != 999.9]
data = data[data['dewp'] != 9999.9]
# Rename some columns for readability
data = data.rename(columns={'day_of_week': 'weekday'})
data = data.rename(columns={'max': 'max_temp'})
data = data.rename(columns={'dewp': 'dew_point'})
# Drop columns you won't use to train this model
data = data.drop(columns=['start_station_name', 'end_station_name', 'bike_id', 'snow_ice_pellets'])
# Convert trip duration from seconds to minutes so it's easier to understand
data['duration'] = data['duration'].apply(lambda x: float(x / 60))
# Preview the first 5 rows of training data
data.head()
# Save duration to its own DataFrame and remove it from the original DataFrame
labels = data['duration']
data = data.drop(columns=['duration'])
# Use 80/20 train/test split
train_size = int(len(data) * .8)
print("Train size: %d" % train_size)
print("Test size: %d" % (len(data) - train_size))
# Split your data into train and test sets
train_data = data[:train_size]
train_labels = labels[:train_size]
test_data = data[train_size:]
test_labels = labels[train_size:]
# Build your model
model = tf.keras.Sequential(name="bike_predict")
model.add(tf.keras.layers.Dense(64, input_dim=len(train_data.iloc[0]), activation='relu'))
model.add(tf.keras.layers.Dense(32, activation='relu'))
model.add(tf.keras.layers.Dense(1))
# Compile the model and see a summary
optimizer = tf.keras.optimizers.Adam(0.001)
model.compile(loss='mean_squared_logarithmic_error', optimizer=optimizer)
model.summary()
batch_size = 256
epochs = 3
input_train = tf.data.Dataset.from_tensor_slices(train_data)
output_train = tf.data.Dataset.from_tensor_slices(train_labels)
input_train = input_train.batch(batch_size).repeat()
output_train = output_train.batch(batch_size).repeat()
train_dataset = tf.data.Dataset.zip((input_train, output_train))
# This will take about a minute to run
# To keep training time short, you're not using the full dataset
model.fit(train_dataset, steps_per_epoch=train_size // batch_size, epochs=epochs)
# Run evaluation
results = model.evaluate(test_data, test_labels)
print(results)
# Send test instances to model for prediction
predict = model.predict(test_data[:5])
# Preview predictions on the first 5 examples from your test dataset
for i, val in enumerate(predict):
print('Predicted duration: {}'.format(round(val[0])))
print('Actual duration: {} \n'.format(test_labels.iloc[i]))
export_path = 'gs://' + BUCKET_NAME + '/explanations/mymodel'
model.save(export_path)
print(export_path)
! saved_model_cli show --dir $export_path --all
# Print the names of your tensors
print('Model input tensor: ', model.input.name)
print('Model output tensor: ', model.output.name)
from explainable_ai_sdk.metadata.tf.v2 import SavedModelMetadataBuilder
builder = SavedModelMetadataBuilder(export_path)
builder.set_numeric_metadata(
model.input.name.split(':')[0],
input_baselines=[train_data.median().values.tolist()],
index_feature_mapping=train_data.columns.tolist()
)
builder.save_metadata(export_path)
import datetime
MODEL = 'bike' + datetime.datetime.now().strftime("%d%m%Y%H%M%S")
# Create the model if it doesn't exist yet (you only need to run this once)
! gcloud ai-platform models create $MODEL --enable-logging --region=$REGION
# Each time you create a version the name should be unique
VERSION = 'v1'
# Create the version with gcloud
explain_method = 'integrated-gradients'
! gcloud beta ai-platform versions create $VERSION \
--model $MODEL \
--origin $export_path \
--runtime-version 2.1 \
--framework TENSORFLOW \
--python-version 3.7 \
--machine-type n1-standard-4 \
--explanation-method $explain_method \
--num-integral-steps 25 \
--region $REGION
# Make sure the model deployed correctly. State should be `READY` in the following log
! gcloud ai-platform versions describe $VERSION --model $MODEL --region $REGION
# Format data for prediction to your model
prediction_json = {model.input.name.split(':')[0]: test_data.iloc[0].values.tolist()}
remote_ig_model = explainable_ai_sdk.load_model_from_ai_platform(project=PROJECT_ID,
model=MODEL,
version=VERSION,
region=REGION)
ig_response = remote_ig_model.explain([prediction_json])
attr = ig_response[0].get_attribution()
predicted = round(attr.example_score, 2)
print('Predicted duration: ' + str(predicted) + ' minutes')
print('Actual duration: ' + str(test_labels.iloc[0]) + ' minutes')
ig_response[0].visualize_attributions()
# Prepare 10 test examples to your model for prediction
pred_batch = []
for i in range(10):
pred_batch.append({model.input.name.split(':')[0]: test_data.iloc[i].values.tolist()})
test_response = remote_ig_model.explain(pred_batch)
def sanity_check_explanations(example, mean_tgt_value=None, variance_tgt_value=None):
passed_test = 0
total_test = 1
# `attributions` is a dict where keys are the feature names
# and values are the feature attributions for each feature
attr = example.get_attribution()
baseline_score = attr.baseline_score
# sum_with_baseline = np.sum(attribution_vals) + baseline_score
predicted_val = attr.example_score
# Sanity check 1
# The prediction at the input is equal to that at the baseline.
# Please use a different baseline. Some suggestions are: random input, training
# set mean.
if abs(predicted_val - baseline_score) <= 0.05:
print('Warning: example score and baseline score are too close.')
print('You might not get attributions.')
else:
passed_test += 1
# Sanity check 2 (only for models using Integrated Gradient explanations)
# Ideally, the sum of the integrated gradients must be equal to the difference
# in the prediction probability at the input and baseline. Any discrepency in
# these two values is due to the errors in approximating the integral.
if explain_method == 'integrated-gradients':
total_test += 1
want_integral = predicted_val - baseline_score
got_integral = sum(attr.post_processed_attributions.values())
if abs(want_integral - got_integral) / abs(want_integral) > 0.05:
print('Warning: Integral approximation error exceeds 5%.')
print('Please try increasing the number of integrated gradient steps.')
else:
passed_test += 1
print(passed_test, ' out of ', total_test, ' sanity checks passed.')
for response in test_response:
sanity_check_explanations(response)
# This is the number of data points you'll send to the What-if Tool
WHAT_IF_TOOL_SIZE = 500
from witwidget.notebook.visualization import WitWidget, WitConfigBuilder
def create_list(ex_dict):
new_list = []
for i in feature_names:
new_list.append(ex_dict[i])
return new_list
def example_dict_to_input(example_dict):
return {'dense_input': create_list(example_dict)}
from collections import OrderedDict
wit_data = test_data.iloc[:WHAT_IF_TOOL_SIZE].copy()
wit_data['duration'] = test_labels[:WHAT_IF_TOOL_SIZE]
wit_data_dict = wit_data.to_dict(orient='records', into=OrderedDict)
config_builder = WitConfigBuilder(
wit_data_dict
).set_ai_platform_model(
PROJECT_ID,
MODEL,
VERSION,
adjust_example=example_dict_to_input
).set_target_feature('duration').set_model_type('regression')
WitWidget(config_builder)
# Delete model version resource
! gcloud ai-platform versions delete $VERSION --quiet --model $MODEL
# Delete model resource
! gcloud ai-platform models delete $MODEL --quiet
# Delete Cloud Storage objects that were created
! gsutil -m rm -r gs://$BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Baseline evaluation
Step2: That's pretty impressive for the baseline query. It beats our baseline cross_fields query but not quite the optimized one.
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import importlib
import os
import sys
from elasticsearch import Elasticsearch
from skopt.plots import plot_objective
# project library
sys.path.insert(0, os.path.abspath('..'))
import qopt
importlib.reload(qopt)
from qopt.notebooks import evaluate_mrr100_dev, optimize_query_mrr100
from qopt.optimize import Config
# use a local Elasticsearch or Cloud instance (https://cloud.elastic.co/)
es = Elasticsearch('http://localhost:9200')
# set the parallelization parameter `max_concurrent_searches` for the Rank Evaluation API calls
max_concurrent_searches = 10
# max_concurrent_searches = 30
index = 'msmarco-document'
template_id = 'best_fields'
_ = evaluate_mrr100_dev(es, max_concurrent_searches, index, template_id,
params={
'tie_breaker': 0.0,
'url|boost': 1.0,
'title|boost': 1.0,
'body|boost': 1.0,
})
%%time
_, _, final_params_best_fields, metadata_best_fields = optimize_query_mrr100(es, max_concurrent_searches, index, template_id,
config_space=Config.parse({
'method': 'bayesian',
'num_iterations': 100,
'num_initial_points': 40,
'space': {
'tie_breaker': { 'low': 0.0, 'high': 1.0 },
'url|boost': { 'low': 0.0, 'high': 10.0 },
'title|boost': { 'low': 0.0, 'high': 10.0 },
'body|boost': { 'low': 0.0, 'high': 10.0 },
},
}))
_ = plot_objective(metadata_best_fields, sample_source='result')
%%time
_ = evaluate_mrr100_dev(es, max_concurrent_searches, index, template_id, params=final_params_best_fields)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Session 2 Primer
Step3: Example call
|
<ASSISTANT_TASK:>
Python Code:
# ensure that plots are shown inline
%matplotlib inline
import numpy as np # <- efficient vector/matrix operations (similar to MATLAB)
# the next ones are not required here, but might become useful later on, check if they're installed
import matplotlib as plt # <- basic plotting
import seaborn as sns # <- fancy plotting
import pandas as pd # <- a powerful data analysis and manipulation library for Python
import sklearn # <- machine learning libray
import scipy # <- scientific computing in general
import sympy # <- symbolic calculations
'---------------------------------------------'
def simulateTree(p0, nGens):
simulates a single tree from the Till/McCulloch model
inputs:
p0: probability that a single cell undergoes terminal differentiation (i.e. no more division)
nGens: number of generations to simulate
returns:
a list (one element per generation) of single cells present at that generation.
a single element is just an array of cells present at that time
(zeros for stem cells, 1s for differentiated cells).
# cell state is either 0 (stem cell) or 1 (differentiated),
# which is the only thing we keep track of here
theGenerations = list()
theGenerations.append(np.array(0))
for g in range(nGens):
lastGen = theGenerations[-1]
# for each of the last generation, roll a dice whether it terminally diffs
newState = roll_the_dice(lastGen, p0)
#all the zeros divide, the 1's just stay
n0 = sum(newState==0) # beware: this is pythons interal sum(), not the one from numpy (which is loads fasters)
n1 = sum(newState==1) # however, speed doesnt really matter here
nextGen = np.concatenate([np.repeat(0, 2*n0), np.repeat(1,n1)])
theGenerations.append(nextGen)
return theGenerations
def roll_the_dice(cellstate_array, p0):
decide if a cell goes from 0->1 (wit probability p0)
does that for an entire vector of zeros and ones in paralell
# helper function so that we can index into it via generation
# makes sure that as soon as cell_state==1 it wont change anymore
tmpP = np.array([p0, 1])
p = tmpP[cellstate_array]
r = np.random.rand(cellstate_array.size)
newGeneration = r<p
return newGeneration.astype(int)
# Example call
aSingleTree = simulateTree(p0=0.1, nGens=5)
print aSingleTree # just a list of numpy.arrays
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Survival analysis
Step3: The survival function is just the complementary CDF.
Step4: Here's the CDF and SF.
Step5: And here's the hazard function.
Step6: Age at first marriage
Step7: We have to clean up a few variables.
Step8: And the extract the age at first marriage for people who are married, and the age at time of interview for people who are not.
Step10: The following function uses Kaplan-Meier to estimate the hazard function.
Step11: Here is the hazard function and corresponding survival function.
Step14: Quantifying uncertainty
Step15: The following plot shows the survival function based on the raw data and a 90% CI based on resampling.
Step16: The SF based on the raw data falls outside the 90% CI because the CI is based on weighted resampling, and the raw data is not. You can confirm that by replacing ResampleRowsWeighted with ResampleRows in ResampleSurvival.
Step20: The following is the code from survival.py that generates SFs broken down by decade of birth.
Step21: Here are the results for the combined data.
Step23: We can generate predictions by assuming that the hazard function of each generation will be the same as for the previous generation.
Step24: And here's what that looks like.
Step25: Remaining lifetime
Step26: Here's the expected remaining duration of a pregnancy as a function of the number of weeks elapsed. After week 36, the process becomes "memoryless".
Step27: And here's the median remaining time until first marriage as a function of age.
Step33: Exercises
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
%matplotlib inline
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
import numpy as np
import pandas as pd
import random
import thinkstats2
import thinkplot
import nsfg
preg = nsfg.ReadFemPreg()
complete = preg.query('outcome in [1, 3, 4]').prglngth
cdf = thinkstats2.Cdf(complete, label='cdf')
import survival
def MakeSurvivalFromCdf(cdf, label=''):
Makes a survival function based on a CDF.
cdf: Cdf
returns: SurvivalFunction
ts = cdf.xs
ss = 1 - cdf.ps
return survival.SurvivalFunction(ts, ss, label)
sf = MakeSurvivalFromCdf(cdf, label='survival')
print(cdf[13])
print(sf[13])
thinkplot.Plot(sf)
thinkplot.Cdf(cdf, alpha=0.2)
thinkplot.Config(loc='center left')
hf = sf.MakeHazardFunction(label='hazard')
print(hf[39])
thinkplot.Plot(hf)
thinkplot.Config(ylim=[0, 0.75], loc='upper left')
resp6 = nsfg.ReadFemResp()
resp6.cmmarrhx.replace([9997, 9998, 9999], np.nan, inplace=True)
resp6['agemarry'] = (resp6.cmmarrhx - resp6.cmbirth) / 12.0
resp6['age'] = (resp6.cmintvw - resp6.cmbirth) / 12.0
complete = resp6[resp6.evrmarry==1].agemarry.dropna()
ongoing = resp6[resp6.evrmarry==0].age
from collections import Counter
def EstimateHazardFunction(complete, ongoing, label='', verbose=False):
Estimates the hazard function by Kaplan-Meier.
http://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
label: string
verbose: whether to display intermediate results
if np.sum(np.isnan(complete)):
raise ValueError("complete contains NaNs")
if np.sum(np.isnan(ongoing)):
raise ValueError("ongoing contains NaNs")
hist_complete = Counter(complete)
hist_ongoing = Counter(ongoing)
ts = list(hist_complete | hist_ongoing)
ts.sort()
at_risk = len(complete) + len(ongoing)
lams = pd.Series(index=ts)
for t in ts:
ended = hist_complete[t]
censored = hist_ongoing[t]
lams[t] = ended / at_risk
if verbose:
print(t, at_risk, ended, censored, lams[t])
at_risk -= ended + censored
return survival.HazardFunction(lams, label=label)
hf = EstimateHazardFunction(complete, ongoing)
thinkplot.Plot(hf)
thinkplot.Config(xlabel='Age (years)',
ylabel='Hazard')
sf = hf.MakeSurvival()
thinkplot.Plot(sf)
thinkplot.Config(xlabel='Age (years)',
ylabel='Prob unmarried',
ylim=[0, 1])
def EstimateMarriageSurvival(resp):
Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
# NOTE: Filling missing values would be better than dropping them.
complete = resp[resp.evrmarry == 1].agemarry.dropna()
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
def ResampleSurvival(resp, iters=101):
Resamples respondents and estimates the survival function.
resp: DataFrame of respondents
iters: number of resamples
_, sf = EstimateMarriageSurvival(resp)
thinkplot.Plot(sf)
low, high = resp.agemarry.min(), resp.agemarry.max()
ts = np.arange(low, high, 1/12.0)
ss_seq = []
for _ in range(iters):
sample = thinkstats2.ResampleRowsWeighted(resp)
_, sf = EstimateMarriageSurvival(sample)
ss_seq.append(sf.Probs(ts))
low, high = thinkstats2.PercentileRows(ss_seq, [5, 95])
thinkplot.FillBetween(ts, low, high, color='gray', label='90% CI')
ResampleSurvival(resp6)
thinkplot.Config(xlabel='Age (years)',
ylabel='Prob unmarried',
xlim=[12, 46],
ylim=[0, 1],
loc='upper right')
resp5 = survival.ReadFemResp1995()
resp6 = survival.ReadFemResp2002()
resp7 = survival.ReadFemResp2010()
resps = [resp5, resp6, resp7]
def AddLabelsByDecade(groups, **options):
Draws fake points in order to add labels to the legend.
groups: GroupBy object
thinkplot.PrePlot(len(groups))
for name, _ in groups:
label = '%d0s' % name
thinkplot.Plot([15], [1], label=label, **options)
def EstimateMarriageSurvivalByDecade(groups, **options):
Groups respondents by decade and plots survival curves.
groups: GroupBy object
thinkplot.PrePlot(len(groups))
for _, group in groups:
_, sf = EstimateMarriageSurvival(group)
thinkplot.Plot(sf, **options)
def PlotResampledByDecade(resps, iters=11, predict_flag=False, omit=None):
Plots survival curves for resampled data.
resps: list of DataFrames
iters: number of resamples to plot
predict_flag: whether to also plot predictions
for i in range(iters):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pd.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if omit:
groups = [(name, group) for name, group in groups
if name not in omit]
# TODO: refactor this to collect resampled estimates and
# plot shaded areas
if i == 0:
AddLabelsByDecade(groups, alpha=0.7)
if predict_flag:
PlotPredictionsByDecade(groups, alpha=0.1)
EstimateMarriageSurvivalByDecade(groups, alpha=0.1)
else:
EstimateMarriageSurvivalByDecade(groups, alpha=0.2)
PlotResampledByDecade(resps)
thinkplot.Config(xlabel='Age (years)',
ylabel='Prob unmarried',
xlim=[13, 45],
ylim=[0, 1])
def PlotPredictionsByDecade(groups, **options):
Groups respondents by decade and plots survival curves.
groups: GroupBy object
hfs = []
for _, group in groups:
hf, sf = EstimateMarriageSurvival(group)
hfs.append(hf)
thinkplot.PrePlot(len(hfs))
for i, hf in enumerate(hfs):
if i > 0:
hf.Extend(hfs[i-1])
sf = hf.MakeSurvival()
thinkplot.Plot(sf, **options)
PlotResampledByDecade(resps, predict_flag=True)
thinkplot.Config(xlabel='Age (years)',
ylabel='Prob unmarried',
xlim=[13, 45],
ylim=[0, 1])
preg = nsfg.ReadFemPreg()
complete = preg.query('outcome in [1, 3, 4]').prglngth
print('Number of complete pregnancies', len(complete))
ongoing = preg[preg.outcome == 6].prglngth
print('Number of ongoing pregnancies', len(ongoing))
hf = EstimateHazardFunction(complete, ongoing)
sf1 = hf.MakeSurvival()
rem_life1 = sf1.RemainingLifetime()
thinkplot.Plot(rem_life1)
thinkplot.Config(title='Remaining pregnancy length',
xlabel='Weeks',
ylabel='Mean remaining weeks')
hf, sf2 = EstimateMarriageSurvival(resp6)
func = lambda pmf: pmf.Percentile(50)
rem_life2 = sf2.RemainingLifetime(filler=np.inf, func=func)
thinkplot.Plot(rem_life2)
thinkplot.Config(title='Years until first marriage',
ylim=[0, 15],
xlim=[11, 31],
xlabel='Age (years)',
ylabel='Median remaining years')
def CleanData(resp):
Cleans respondent data.
resp: DataFrame
resp.cmdivorcx.replace([9998, 9999], np.nan, inplace=True)
resp['notdivorced'] = resp.cmdivorcx.isnull().astype(int)
resp['duration'] = (resp.cmdivorcx - resp.cmmarrhx) / 12.0
resp['durationsofar'] = (resp.cmintvw - resp.cmmarrhx) / 12.0
month0 = pd.to_datetime('1899-12-15')
dates = [month0 + pd.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['decade'] = (pd.DatetimeIndex(dates).year - 1900) // 10
CleanData(resp6)
married6 = resp6[resp6.evrmarry==1]
CleanData(resp7)
married7 = resp7[resp7.evrmarry==1]
# Solution
def ResampleDivorceCurve(resps):
Plots divorce curves based on resampled data.
resps: list of respondent DataFrames
for _ in range(11):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pd.concat(samples, ignore_index=True)
PlotDivorceCurveByDecade(sample, color='#225EA8', alpha=0.1)
thinkplot.Show(xlabel='years',
axis=[0, 28, 0, 1])
# Solution
def ResampleDivorceCurveByDecade(resps):
Plots divorce curves for each birth cohort.
resps: list of respondent DataFrames
for i in range(41):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pd.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if i == 0:
survival.AddLabelsByDecade(groups, alpha=0.7)
EstimateSurvivalByDecade(groups, alpha=0.1)
thinkplot.Config(xlabel='Years',
ylabel='Fraction undivorced',
axis=[0, 28, 0, 1])
# Solution
def EstimateSurvivalByDecade(groups, **options):
Groups respondents by decade and plots survival curves.
groups: GroupBy object
thinkplot.PrePlot(len(groups))
for name, group in groups:
_, sf = EstimateSurvival(group)
thinkplot.Plot(sf, **options)
# Solution
def EstimateSurvival(resp):
Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
complete = resp[resp.notdivorced == 0].duration.dropna()
ongoing = resp[resp.notdivorced == 1].durationsofar.dropna()
hf = survival.EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
# Solution
ResampleDivorceCurveByDecade([married6, married7])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: (1) We will fit the data contained within Fig. 3B. Plot this data and describe the relationship you see between Kx, Kd, and valency.
Step2: (2) First, to do so, we'll need a function that takes the model predictions, scales them to the units of the actual measurements, and finds the predictions for each condition. Define a scaling parameter and a function that takes it along with the other parameters to make predictions about the experiment.
Step3: (2) Now use scipy.optimize.least_squares to find the least squares solution.
Step4: (3) Using leave-one-out crossvalidation, does this model predict the data? Plot the measured vs. predicted data.
Step5: (4) Using bootstrap estimation, plot the confidence interval of the model along with the data points.
Step6: (6) While easier to perform, a local sensitivity analysis ignores codependency between the parameters. Do you anticipate your predictions of the parameter values will be more or less specified with a global analysis?
Step7: (7) Now, vary each parameter from the optimal solution, allowing the other parameters to vary. Was your prediction true? How might the other parameters be varying when Kd increases?
Step8: (8) At the same time as providing the number of multimerized receptors, the model also infers the quantities of other properties, such as the amount of ligand and receptor bound. Using the bootstrap estimates, plot the confidence in these other parameters. Are these more or less exactly specified than Rmulti? What can you say about which quantities will be most exactly predicted?
|
<ASSISTANT_TASK:>
Python Code:
% matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import binom
from scipy.optimize import brentq
np.seterr(over='raise')
def StoneMod(Rtot, Kd, v, Kx, L0):
'''
Returns the number of mutlivalent ligand bound to a cell with Rtot
receptors, granted each epitope of the ligand binds to the receptor
kind in question with dissociation constant Kd and cross-links with
other receptors with crosslinking constant Kx. All eq derived from Stone et al. (2001).
'''
v = np.int_(v)
# Mass balance for receptor species, to identify the amount of free receptor
diffFunAnon = lambda x: Rtot-x*(1+v*L0*(1/Kd)*(1+Kx*x)**(v-1))
# Check that there is a solution
if diffFunAnon(0) * diffFunAnon(Rtot) > 0:
raise RuntimeError("There is no solution with these parameters. Are your inputs correct?")
# Vector of binomial coefficients
Req = brentq(diffFunAnon, 0, Rtot, disp=False)
# Calculate vieq from equation 1
vieq = L0*(1/Kd)*Req*(binom(v, np.arange(1, v + 1))) * np.power(Kx*Req, np.arange(v))
# Calculate L, according to equation 7
Lbound = np.sum(vieq)
# Calculate Rmulti from equation 5
Rmulti = np.sum(np.multiply(vieq[1:], np.arange(2, v + 1, dtype=np.float)))
# Calculate Rbound
Rbnd = np.sum(np.multiply(vieq, np.arange(1, v + 1, dtype=np.float)))
# Calculate numXlinks from equation 4
nXlink = np.sum(np.multiply(vieq[1:], np.arange(1, v, dtype=np.float)))
return (Lbound, Rbnd, Rmulti, nXlink)
data = np.loadtxt("./data/wk3-stone.csv", delimiter=",")
# Vector of the ligand concentrations, cell response (proportional to Rmulti), valencies
Xs, Ys, Vs = np.hsplit(data, 3)
Xs = np.squeeze(Xs)
Ys = np.squeeze(Ys)
Vs = np.squeeze(Vs)
plt.semilogx(Xs, Ys, '.');
plt.xlabel('Concentration');
plt.ylabel('CD3 (1/cell)');
XsSim = np.repeat(np.logspace(-11, -5), 3)
VsSim = np.tile(np.array([2, 3, 4]), 50)
def predict(Rtot, Kd, Kx, Vs, Ls, scale):
pred = np.zeros(Ls.shape)
for ii in range(Ls.size):
pred[ii] = StoneMod(Rtot, Kd, Vs[ii], Kx, Ls[ii])[2]
return pred * scale
Rtot = 24000
ss = predict(Rtot, 1.7E-6, 3.15E-4, VsSim, XsSim, 1.0)
plt.semilogx(XsSim, ss, '.');
plt.semilogx(Xs, Ys, '.');
plt.xlabel('Concentration');
plt.ylabel('CD3 (1/cell)');
Ypred = lambda x: predict(Rtot, x[0], x[1], Vs, Xs, x[2]) - Ys
from scipy.optimize import least_squares
sol = least_squares(Ypred, [1.7E-6, 3.15E-4, 1.0])
best_x = sol.x
# Answer
ssePred = lambda x: np.sum(np.square(Ypred(x)))
a = np.logspace(-1, 1, num = 41)
b = np.stack((a, a, a))
for ii in range(b.shape[0]):
for jj in range(b.shape[1]):
temp = best_x.copy()
temp[ii] = temp[ii] * a[jj]
b[ii, jj] = ssePred(temp)
b = b / np.min(np.min(b))
plt.loglog(a, b[0, :]);
plt.loglog(a, b[1, :]);
plt.loglog(a, b[2, :]);
# Answer.
bglobal = np.stack((a, a, a))
for ii in range(bglobal.shape[0]):
for jj in range(bglobal.shape[1]):
temp = best_x.copy()
temp[ii] = temp[ii] * a[jj]
lb = np.array([-np.inf, -np.inf, -np.inf])
ub = -lb
lb[ii] = temp[ii] - 1.0E-12
ub[ii] = temp[ii] + 1.0E-12
bndtemp = (lb, ub)
x0 = [1.7E-6, 3.15E-4, 1.0]
x0[ii] = temp[ii]
bglobal[ii, jj] = least_squares(Ypred, x0, bounds = bndtemp).cost
bglobal = bglobal / np.min(np.min(bglobal))
for ii in range(3):
plt.loglog(a, bglobal[ii, :]);
# Answer
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we'll use two interpacf methods on these simulated fluxes
Step2: Comparing with McQuillan, Aigrain & Mazeh (2013)
Step3: Now measure the peak in the autocorrelation function for each quarter's light curve
Step4: Compare with McQuillan+ 2013
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# Make flux time-series with random noise, and
# two periodic oscillations, one 70% the amplitude
# of the other:
np.random.seed(42)
n_points = 1000
primary_period = 2.5*np.pi
secondary_period = 1.3*np.pi
all_times = np.linspace(0, 6*np.pi, n_points)
all_fluxes = 10 + (0.1*np.random.randn(len(all_times)) +
np.sin(2*np.pi/primary_period * all_times) +
0.7*np.cos(2*np.pi/secondary_period * (all_times - 2.5)))
# Remove some fluxes, times from those data:
n_points_missing = 200 # This number is approximate
missing_indices = np.unique(np.random.randint(0, n_points,
size=n_points_missing))
mask = list(set(np.arange(len(all_times))).difference(set(missing_indices)))
times_incomplete = all_times[mask]
fluxes_incomplete = all_fluxes[mask]
# Plot these fluxes before and after data are removed:
fig, ax = plt.subplots(1, 2, figsize=(14, 5))
ax[0].plot(all_times, all_fluxes, '.')
ax[0].set(title='All fluxes (N={0})'.format(len(all_fluxes)))
ax[1].plot(times_incomplete, fluxes_incomplete, '.')
ax[1].set(title='With fluxes missing (N={0})'.format(len(fluxes_incomplete)))
plt.show()
from interpacf import interpolated_acf, dominant_period
# Need zero-mean fluxes:
fluxes_incomplete -= np.mean(fluxes_incomplete)
# Compute autocorrelation function
lag, acf = interpolated_acf(times_incomplete, fluxes_incomplete)
# Find dominant period in autocorrelation function
detected_period = dominant_period(lag, acf, plot=True)
print("Actual dominant period: {0:.3f}\nDetected dominant period: "
"{1:.3f}\nDifference: {2:.3f}%"
.format(primary_period, detected_period,
(primary_period - detected_period)/primary_period))
import numpy as np
import kplr
client = kplr.API()
# Find the target KOI.
koi = client.koi(3.01)
# Get a list of light curve datasets.
lcs = koi.get_light_curves(short_cadence=False)
# Loop over the datasets and read in the data.
time, flux, ferr, quality = [], [], [], []
for lc in lcs[1:]:
with lc.open() as f:
# The lightcurve data are in the first FITS HDU.
hdu_data = f[1].data
time.append(hdu_data["time"])
flux.append(hdu_data["sap_flux"])
ferr.append(hdu_data["sap_flux_err"])
quality.append(hdu_data["sap_quality"])
time = np.array(time)
# Median normalize each quarter of observations
flux = np.array([f/np.nanmedian(f) - 1 for f in flux])
%matplotlib inline
periods = []
for i, t, f in zip(range(len(time)), time, flux):
lag, acf = interpolated_acf(t[~np.isnan(f)], f[~np.isnan(f)])
period = dominant_period(lag, acf)
periods.append(period)
print("HAT-P-11 period in Q{0}: {1} d".format(i, period))
print("Median period (interpacf): {0};\n"
"Period McQuillan+ 2013: 29.472"
.format(np.median(periods))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Affine Layer
Step2: Batch Normalization Layer
Step3: Dropout Layer
Step4: PReLU (Parametric Rectified Linear Unit) Layer
Step5: ReLU (Rectified Linear Unit) Layer
Step6: Sigmoid Layer
Step7: Softmax Loss Layer
Step8: SVM Loss Layer
Step9: Tanh Layer
Step10: Multilayer Fully Connected Network with Augmentations
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import numpy as np
import LearnyMcLearnface as lml
affine = lml.layers.AffineLayer(30, 10, 1e-2)
test_input = np.random.randn(50, 30)
dout = np.random.randn(50, 10)
_ = affine.forward(test_input)
dx_num = lml.utils.numerical_gradient_layer(lambda x : affine.forward(x, affine.W, affine.b), test_input, dout)
dW_num = lml.utils.numerical_gradient_layer(lambda w : affine.forward(test_input, w, affine.b), affine.W, dout)
db_num = lml.utils.numerical_gradient_layer(lambda b : affine.forward(test_input, affine.W, b), affine.b, dout)
dx = affine.backward(dout)
print('Affine dx error:', np.max(lml.utils.relative_error(dx, dx_num)))
print('Affine dW error:', np.max(lml.utils.relative_error(affine.dW, dW_num)))
print('Affine db error:', np.max(lml.utils.relative_error(affine.db, db_num)))
batchnorm = lml.layers.BatchnormLayer(10, 0.9)
test_input = np.random.randn(20, 10)
dout = np.random.randn(20, 10)
_ = batchnorm.forward_train(test_input)
dx_num = lml.utils.numerical_gradient_layer(lambda x : batchnorm.forward_train(x), test_input, dout)
dx = batchnorm.backward(dout)
print('Batchnorm dx error:', np.max(lml.utils.relative_error(dx, dx_num)))
dropout = lml.layers.DropoutLayer(10, 0.6, seed=5684)
test_input = np.random.randn(3, 10)
dout = np.random.randn(3, 10)
_ = dropout.forward_train(test_input)
dx_num = lml.utils.numerical_gradient_layer(lambda x : dropout.forward_train(x), test_input, dout)
dx = dropout.backward(dout)
print('Dropout dx error:', np.max(lml.utils.relative_error(dx, dx_num)))
prelu = lml.layers.PReLULayer(10)
test_input = np.random.randn(50, 10)
dout = np.random.randn(50, 10)
_ = prelu.forward(test_input)
dx_num = lml.utils.numerical_gradient_layer(lambda x : prelu.forward(x, prelu.W), test_input, dout)
dW_num = lml.utils.numerical_gradient_layer(lambda w : prelu.forward(test_input, w), prelu.W, dout)
dx = prelu.backward(dout)
print('PReLU dx error:', np.max(lml.utils.relative_error(dx, dx_num)))
print('PReLU dW error:', np.max(lml.utils.relative_error(prelu.dW, dW_num)))
relu = lml.layers.ReLULayer(10)
test_input = np.random.randn(50, 10)
dout = np.random.randn(50, 10)
_ = relu.forward(test_input)
dx_num = lml.utils.numerical_gradient_layer(lambda x : relu.forward(x), test_input, dout)
dx = relu.backward(dout)
print('ReLU dx error:', np.max(lml.utils.relative_error(dx, dx_num)))
sigmoid = lml.layers.SigmoidLayer(10)
test_input = np.random.randn(50, 10)
dout = np.random.randn(50, 10)
_ = sigmoid.forward(test_input)
dx_num = lml.utils.numerical_gradient_layer(lambda x : sigmoid.forward(x), test_input, dout)
dx = sigmoid.backward(dout)
print('Sigmoid dx error:', np.max(lml.utils.relative_error(dx, dx_num)))
softmax = lml.layers.SoftmaxLossLayer(10)
test_scores = np.random.randn(50, 10)
test_classes = np.random.randint(1, 10, 50)
_, dx = softmax.loss(test_scores, test_classes)
dx_num = lml.utils.numerical_gradient(lambda x : softmax.loss(x, test_classes)[0], test_scores)
print('Softmax backprop error:', np.max(lml.utils.relative_error(dx, dx_num)))
svm = lml.layers.SVMLossLayer(10)
test_scores = np.random.randn(50, 10)
test_classes = np.random.randint(1, 10, 50)
_, dx = svm.loss(test_scores, test_classes)
dx_num = lml.utils.numerical_gradient(lambda x : svm.loss(x, test_classes)[0], test_scores)
print('SVM backprop error:', np.max(lml.utils.relative_error(dx, dx_num)))
opts = {
'input_dim' : 10,
'data_type' : np.float64
}
nn = lml.NeuralNetwork(opts)
nn.add_layer('Affine', {'neurons':10, 'weight_scale':5e-2})
nn.add_layer('ReLU', {})
nn.add_layer('Affine', {'neurons':10, 'weight_scale':5e-2})
nn.add_layer('SoftmaxLoss', {})
test_scores = np.random.randn(20, 10)
test_classes = np.random.randint(1, 10, 20)
loss, dx = nn.backward(test_scores, test_classes)
print('With regularization off:')
f = lambda _: nn.backward(test_scores, test_classes)[0]
d_b1_num = lml.utils.numerical_gradient(f, nn.layers[0].b, accuracy=1e-8)
d_W1_num = lml.utils.numerical_gradient(f, nn.layers[0].W, accuracy=1e-8)
print('Weight 1 error:', np.max(lml.utils.relative_error(nn.layers[0].dW, d_W1_num)))
print('Bias 1 error:', np.max(lml.utils.relative_error(nn.layers[0].db, d_b1_num)))
d_b2_num = lml.utils.numerical_gradient(f, nn.layers[2].b, accuracy=1e-8)
d_W2_num = lml.utils.numerical_gradient(f, nn.layers[2].W, accuracy=1e-8)
print('Weight 2 error:', np.max(lml.utils.relative_error(nn.layers[2].dW, d_W2_num)))
print('Bias 2 error:', np.max(lml.utils.relative_error(nn.layers[2].db, d_b2_num)))
print('With regularization at lambda = 1.0:')
f = lambda _: nn.backward(test_scores, test_classes, reg_param=1.0)[0]
d_b1_num = lml.utils.numerical_gradient(f, nn.layers[0].b, accuracy=1e-8)
d_W1_num = lml.utils.numerical_gradient(f, nn.layers[0].W, accuracy=1e-8)
print('Weight 1 error:', np.max(lml.utils.relative_error(nn.layers[0].dW, d_W1_num)))
print('Bias 1 error:', np.max(lml.utils.relative_error(nn.layers[0].db, d_b1_num)))
d_b2_num = lml.utils.numerical_gradient(f, nn.layers[2].b, accuracy=1e-8)
d_W2_num = lml.utils.numerical_gradient(f, nn.layers[2].W, accuracy=1e-8)
print('Weight 2 error:', np.max(lml.utils.relative_error(nn.layers[2].dW, d_W2_num)))
print('Bias 2 error:', np.max(lml.utils.relative_error(nn.layers[2].db, d_b2_num)))
opts = {
'input_dim' : 10,
'data_type' : np.float64,
'init_scheme' : 'xavier'
}
nn = lml.NeuralNetwork(opts)
nn.add_layer('Affine', {'neurons':10})
nn.add_layer('Batchnorm', {'decay':0.9})
nn.add_layer('PReLU', {})
nn.add_layer('Dropout', {'dropout_param':0.85, 'seed':5684})
nn.add_layer('Affine', {'neurons':10})
nn.add_layer('Batchnorm', {'decay':0.7})
nn.add_layer('PReLU', {})
nn.add_layer('Dropout', {'dropout_param':0.90, 'seed':5684})
nn.add_layer('Affine', {'neurons':10})
nn.add_layer('Batchnorm', {'decay':0.8})
nn.add_layer('PReLU', {})
nn.add_layer('Dropout', {'dropout_param':0.95, 'seed':5684})
nn.add_layer('SoftmaxLoss', {})
test_scores = np.random.randn(20, 10)
test_classes = np.random.randint(1, 10, 20)
loss, dx = nn.backward(test_scores, test_classes)
f = lambda _: nn.backward(test_scores, test_classes, reg_param=0.7)[0]
d_b1_num = lml.utils.numerical_gradient(f, nn.layers[0].b, accuracy=1e-8)
d_W1_num = lml.utils.numerical_gradient(f, nn.layers[0].W, accuracy=1e-8)
print('Weight 1 error:', np.max(lml.utils.relative_error(nn.layers[0].dW, d_W1_num)))
print('Bias 1 error:', np.max(lml.utils.relative_error(nn.layers[0].db, d_b1_num)))
d_gamma1_num = lml.utils.numerical_gradient(f, nn.layers[1].gamma, accuracy=1e-8)
d_beta1_num = lml.utils.numerical_gradient(f, nn.layers[1].beta, accuracy=1e-8)
print('Gamma 1 error:', np.max(lml.utils.relative_error(nn.layers[1].dgamma, d_gamma1_num)))
print('Beta 1 error:', np.max(lml.utils.relative_error(nn.layers[1].dbeta, d_beta1_num)))
d_r1_num = lml.utils.numerical_gradient(f, nn.layers[2].W, accuracy=1e-8)
print('Rectifier 1 error:', np.max(lml.utils.relative_error(nn.layers[2].dW, d_r1_num)))
d_b1_num = lml.utils.numerical_gradient(f, nn.layers[4].b, accuracy=1e-8)
d_W1_num = lml.utils.numerical_gradient(f, nn.layers[4].W, accuracy=1e-8)
print('Weight 2 error:', np.max(lml.utils.relative_error(nn.layers[4].dW, d_W1_num)))
print('Bias 2 error:', np.max(lml.utils.relative_error(nn.layers[4].db, d_b1_num)))
d_gamma2_num = lml.utils.numerical_gradient(f, nn.layers[5].gamma, accuracy=1e-8)
d_beta2_num = lml.utils.numerical_gradient(f, nn.layers[5].beta, accuracy=1e-8)
print('Gamma 2 error:', np.max(lml.utils.relative_error(nn.layers[5].dgamma, d_gamma2_num)))
print('Beta 2 error:', np.max(lml.utils.relative_error(nn.layers[5].dbeta, d_beta2_num)))
d_r2_num = lml.utils.numerical_gradient(f, nn.layers[6].W, accuracy=1e-8)
print('Rectifier 2 error:', np.max(lml.utils.relative_error(nn.layers[6].dW, d_r2_num)))
d_b1_num = lml.utils.numerical_gradient(f, nn.layers[8].b, accuracy=1e-8)
d_W1_num = lml.utils.numerical_gradient(f, nn.layers[8].W, accuracy=1e-8)
print('Weight 3 error:', np.max(lml.utils.relative_error(nn.layers[8].dW, d_W1_num)))
print('Bias 3 error:', np.max(lml.utils.relative_error(nn.layers[8].db, d_b1_num)))
d_gamma3_num = lml.utils.numerical_gradient(f, nn.layers[9].gamma, accuracy=1e-8)
d_beta3_num = lml.utils.numerical_gradient(f, nn.layers[9].beta, accuracy=1e-8)
print('Gamma 3 error:', np.max(lml.utils.relative_error(nn.layers[9].dgamma, d_gamma3_num)))
print('Beta 3 error:', np.max(lml.utils.relative_error(nn.layers[9].dbeta, d_beta3_num)))
d_r3_num = lml.utils.numerical_gradient(f, nn.layers[10].W, accuracy=1e-8)
print('Rectifier 3 error:', np.max(lml.utils.relative_error(nn.layers[10].dW, d_r3_num)))
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.