text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now our data is formatted in the format we want.
Step8: Permutation Test
Step9: Create data
Step10: Create PermTest instance and formate data
Step11: Calculate difference of means
Step12: Create simulated distribution of test statistics (differences of means)
Step13: Visualize simulated distribution
Step14: Obtain p-value for test
Step16: Chi Squared Test
Step17: Create data
Step18: Create instance of ChiSquaredTest
Step19: Get pvalue
Step20: View bootstrapped distribution
|
<ASSISTANT_TASK:>
Python Code:
# define the font styles
title_font = fm.FontProperties(family='serif', style='normal', size=19, weight='normal', stretch='normal')
label_font = fm.FontProperties(family='serif', style='normal', size=16, weight='normal', stretch='normal')
ticks_font = fm.FontProperties(family='serif', style='normal', size=10, weight='normal', stretch='normal')
annotation_font = fm.FontProperties(family='serif', style='normal', size=10, weight='normal', stretch='normal')
axis_bgcolor = 'white'
# make a re-usable function to draw nice bar charts
def plot_hist(data, title='', xlabel='Test Statistic Values', ylabel='Frequency', color='red'):
if type(data) == list:
data = pd.Series(data)
ax = data.hist(figsize=[9, 6], width=0.35, alpha=0.5,
color=color, edgecolor='k', grid=False, rwidth=.9)
ax.set_xticklabels(data.index, rotation=45, rotation_mode='anchor', ha='right')
ax.yaxis.grid(True)
#for label in ax.get_yticklabels():
# label.set_fontproperties(ticks_font)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.grid(False)
ax.set_facecolor(axis_bgcolor)
ax.set_title(title, fontproperties=title_font)
ax.set_xlabel(xlabel, fontproperties=label_font)
ax.set_ylabel(ylabel, fontproperties=label_font)
plt.show()
ax2 = fig.add_subplot(2,1,2)
ax2.hist(dem_electoral, 22, alpha=0.7, label='Democrat',rwidth=.9, color='#0099ff')
ax2.axvline(x=269, c='k', linewidth=7)
ax2.set_title("Histogram of Electoral Votes: Democrats", fontsize=20, **hfont)
ax2.grid(False)
ax2.set_xlabel('Number of Electoral Votes', fontsize=15)
ax2.set_ylabel('Frequency', fontsize=14)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.set_xlim(200,335)
def diff_means3(data, response, treatment):
trt1 = data[treatment].cat.categories[0]
trt2 = data[treatment].cat.categories[1]
mean_diff = data[response][data[treatment]==trt1].mean() - data[response][data[treatment]==trt2].mean()
return mean_diff
class PermTest(object):
def __init__(self, data):
self.data = data
self.formatted_data = None
def format_data(self, response1=None, response2=None):
Converts data from a 2-column format (where each column refers to a treatment type) to
a 2-column format where response is one column, treatment is the other.
This is done for both efficiency of storage and test runtime.
:param data: pandas DataFrame. each column must be of same length
:return: pandas DataFrame in response, treatment format
# create new treatment Series
if (response1==None) and (response2==None):
# No columns specified, so default to first 2
nrow, ncol = self.data.iloc[:,[0,1]].shape
treatment_column = pd.Series([self.data.iloc[:,[0,1]].columns.values[i]
for i in range(ncol)
for j in range(nrow)], dtype='category')
response_column = pd.concat([self.data.iloc[:,0],
self.data.iloc[:,1]]).tolist()
else:
nrow, ncol = self.data.loc[:,[response1,response2]].shape
treatment_column = pd.Series([self.data.loc[:,[response1,response2]].columns.values[i]
for i in range(ncol)
for j in range(nrow)], dtype='category')
response_column = pd.concat([self.data.loc[:,response1],
self.data.loc[:,response2]]).tolist()
self.formatted_data = pd.DataFrame({"response": response_column,
"treatment": treatment_column})
def diff_means(self, response="response", treatment="treatment", data=None):
if data is None:
trt1 = self.formatted_data[treatment].cat.categories[0]
trt2 = self.formatted_data[treatment].cat.categories[1]
mean_diff = self.formatted_data[response][self.formatted_data[treatment]==trt1].mean() \
- self.formatted_data[response][self.formatted_data[treatment]==trt2].mean()
return mean_diff
else:
trt1 = data[treatment].cat.categories[0]
trt2 = data[treatment].cat.categories[1]
mean_diff = data[response][data[treatment]==trt1].mean() \
- data[response][data[treatment]==trt2].mean()
return mean_diff
def simPermDsn(self, response="response", treatment="treatment", test="mean", k=100):
Creates permutation dsn for our data
:param data:
:param hasTrt:
:param testStat:
:param k:
:return:
df = self.formatted_data[:]
testStatistics = []
if test == "mean":
for simulation in range(k):
df[treatment] = pd.Series(np.random.permutation(self.formatted_data[treatment]),
dtype='category')
testStatistics.append(self.diff_means(response=response, treatment=treatment, data=df))
return testStatistics
def pvalue(self, response="response", treatment="treatment", test="mean", k=100):
Returns p-value for test
if test == "mean":
current_stat = self.diff_means(response=response, treatment=treatment)
simulated_stats = self.simPermDsn(response=response, treatment=treatment, test=test, k=k)
simulated_stats = np.array(simulated_stats)
p_value = 1.0 * sum(abs(simulated_stats) >= abs(current_stat))/k
return p_value
def plot_dsn(self, response="response", treatment="treatment", test="mean", k=100):
Plots distribution of test statistics
simulated_stats = self.simPermDsn(response=response, treatment=treatment, test=test, k=k)
tit = "Permutated Test Statistic Distribution for k={} Simulations".format(k)
plot_hist(simulated_stats, title=tit, xlabel="Test Statistic Value", ylabel="Frequency")
data = pd.DataFrame({"treatment1":[1.5, 2, 5, 0.5, 4, 6.6, 4.5, 3, 4, 2.5],
"treatment2":[1, 5, 4, 3, 5, 6, 4, 3, 4.5, 3.2]})
data
pt = PermTest(data)
pt.format_data()
pt.formatted_data
print(pt.diff_means('response', 'treatment', data=None))
testsimdata = pt.simPermDsn(k=1000)
pt.plot_dsn(k=1000)
pt.test()
chi_squared_values = []
for i in range(100):
nums = np.random.random((32561,))
ex = 32561/2
nums[nums < 0.5] = 0
nums[nums >= 0.5] = 1
male = sum(nums==0)
female = 32561-male
male_diff = (male - ex)**2 / ex
female_diff = (female - ex)**2 / ex
chi_squared_values.append(male_diff + female_diff)
plt.hist(chi_squared_values)
plt.show()
# return error if unequal lengths
def chi_squared_statistic(o,e):
o = np.array(o)
e = np.array(e)
return np.sum(1.0*(o - e)**2 / e)
class ChiSquaredTest():
def __init__(self, observed=None, expected=None):
self.observed = observed
self.expected = expected
def plot_dsn(self, k=100):
simulated_stats = self.simPermDsn(k=k)
tit = "Chi-Squared Test Statistic Distribution for k={} Simulations".format(k)
plot_hist(simulated_stats, title=tit, xlabel="Test Statistic Value", ylabel="Frequency")
def testStat(self, input_observed=None, input_expected=None):
if input_observed or input_expected:
chisq_teststat = chi_squared_statistic(input_observed, input_expected)
else:
chisq_teststat = chi_squared_statistic(self.observed, self.expected)
return chisq_teststat
def simPermDsn(self, input_observed=None, input_expected=None, k=100):
if input_observed or input_expected:
test_stat = self.testStat(input_observed, input_expected)
n = len(input_observed)
else:
test_stat = self.testStat()
n = len(self.observed)
# create sampling distribution
chisqrd_vals = []
for i in range(k):
values = np.random.random((n,))
ex = 1.0*n/2
values[values<.5]=0
values[values>=.5]=1
diff1 = chi_squared_statistic(sum(values==0),ex)
diff2 = chi_squared_statistic(sum(values==1),ex)
chisqrd_vals.append(diff1+diff2)
return chisqrd_vals
def pvalue(self, input_observed=None, input_expected=None):
current_stat = self.testStat(input_observed, input_expected)
simulated_stats = self.simPermDsn(input_observed, input_expected)
# p-value = proportion of test stats greater than ours
p_value = 1.0*sum(simulated_stats >= current_stat) / len(simulated_stats)
return p_value
obs = [26816, 3624, 1039, 311, 771]
ex = [26146.5, 3939.9, 1044.3, 310.5, 1119.8]
print(sum(obs))
print(sum(ex))
obs1 = [1,2,3,4,5]
ex1 = [1,2,2,4,5]
chisq = ChiSquaredTest(obs, ex)
chisq.pvalue(obs1,ex1)
chisq.plot_dsn(k=250)
z=None
# To Implement
# Levene's Test for Unequal Variances
# Mann-Whitney Test http://www.stat.wisc.edu/~st571-1/Fall2005/lec18-21.1.pdf
# t-test
# ANOVA http://www.marsja.se/four-ways-to-conduct-one-way-anovas-using-python/
z==None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Project 3D electrodes to a 2D snapshot
Step3: Manually creating 2D electrode positions
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Christopher Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
from scipy.io import loadmat
import numpy as np
from mayavi import mlab
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage # noqa
from mne.viz import plot_alignment, snapshot_brain_montage
print(__doc__)
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
path_data = mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat'
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
mat = loadmat(path_data)
ch_names = mat['ch_names'].tolist()
elec = mat['elec']
dig_ch_pos = dict(zip(ch_names, elec))
mon = mne.channels.DigMontage(dig_ch_pos=dig_ch_pos)
info = mne.create_info(ch_names, 1000., 'ecog', montage=mon)
print('Created %s channel positions' % len(ch_names))
fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces=['pial'], meg=False)
mlab.view(200, 70)
xy, im = snapshot_brain_montage(fig, mon)
# Convert from a dictionary to array to plot
xy_pts = np.vstack(xy[ch] for ch in info['ch_names'])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
fig2, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
# fig2.savefig('./brain.png', bbox_inches='tight') # For ClickableImage
# This code opens the image so you can click on it. Commented out
# because we've stored the clicks as a layout file already.
# # The click coordinates are stored as a list of tuples
# im = plt.imread('./brain.png')
# click = ClickableImage(im)
# click.plot_clicks()
# # Generate a layout from our clicks and normalize by the image
# print('Generating and saving layout...')
# lt = click.to_layout()
# lt.save(op.join(layout_path, layout_name)) # To save if we want
# # We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
x = lt.pos[:, 0] * float(im.shape[1])
y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position
fig, ax = plt.subplots()
ax.imshow(im)
ax.scatter(x, y, s=120, color='r')
plt.autoscale(tight=True)
ax.set_axis_off()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1
Step2: (1b) Element-wise multiplication
Step3: (1c) Dot product
Step4: (1d) Matrix multiplication
Step5: Part 2
Step6: (2b) Element-wise multiplication and dot product
Step7: (2c) Matrix math
Step8: Part 3
Step9: (3b) Combining ndarray objects
Step10: (3c) PySpark's DenseVector
Step11: Part 4
Step12: (4b) lambda fewer steps than def
Step13: (4c) Lambda expression arguments
Step14: (4d) Restrictions on lambda expressions
Step18: (4e) Functional programming
Step19: (4f) Composability
Step20: Part 5
|
<ASSISTANT_TASK:>
Python Code:
labVersion = 'cs190_week1_v_1_2'
# TODO: Replace <FILL IN> with appropriate code
# Manually calculate your answer and represent the vector as a list of integers values.
# For example, [2, 4, 8].
x = [3, -6, 0]
y = [4, 8, 16]
# TEST Scalar multiplication: vectors (1a)
# Import test library
from test_helper import Test
Test.assertEqualsHashed(x, 'e460f5b87531a2b60e0f55c31b2e49914f779981',
'incorrect value for vector x')
Test.assertEqualsHashed(y, 'e2d37ff11427dbac7f833a5a7039c0de5a740b1e',
'incorrect value for vector y')
# TODO: Replace <FILL IN> with appropriate code
# Manually calculate your answer and represent the vector as a list of integers values.
z = [4, 10, 18]
# TEST Element-wise multiplication: vectors (1b)
Test.assertEqualsHashed(z, '4b5fe28ee2d274d7e0378bf993e28400f66205c2',
'incorrect value for vector z')
# TODO: Replace <FILL IN> with appropriate code
# Manually calculate your answer and set the variables to their appropriate integer values.
c1 = -11
c2 = 26
# TEST Dot product (1c)
Test.assertEqualsHashed(c1, '8d7a9046b6a6e21d66409ad0849d6ab8aa51007c', 'incorrect value for c1')
Test.assertEqualsHashed(c2, '887309d048beef83ad3eabf2a79a64a389ab1c9f', 'incorrect value for c2')
# TODO: Replace <FILL IN> with appropriate code
# Represent matrices as lists within lists. For example, [[1,2,3], [4,5,6]] represents a matrix with
# two rows and three columns. Use integer values.
X = [[22, 28], [49, 64]]
Y = [[1, 2, 3], [2, 4, 6], [3, 6, 9]]
# TEST Matrix multiplication (1d)
Test.assertEqualsHashed(X, 'c2ada2598d8a499e5dfb66f27a24f444483cba13',
'incorrect value for matrix X')
Test.assertEqualsHashed(Y, 'f985daf651531b7d776523836f3068d4c12e4519',
'incorrect value for matrix Y')
# It is convention to import NumPy with the alias np
import numpy as np
# TODO: Replace <FILL IN> with appropriate code
# Create a numpy array with the values 1, 2, 3
simpleArray = np.array([1, 2, 3])
# Perform the scalar product of 5 and the numpy array
timesFive = 5*simpleArray
print simpleArray
print timesFive
# TEST Scalar multiplication (2a)
Test.assertTrue(np.all(timesFive == [5, 10, 15]), 'incorrect value for timesFive')
# TODO: Replace <FILL IN> with appropriate code
# Create a ndarray based on a range and step size.
u = np.arange(0, 5, .5)
v = np.arange(5, 10, .5)
elementWise = u*v
dotProduct = np.dot(u, v)
print 'u: {0}'.format(u)
print 'v: {0}'.format(v)
print '\nelementWise\n{0}'.format(elementWise)
print '\ndotProduct\n{0}'.format(dotProduct)
# TEST Element-wise multiplication and dot product (2b)
Test.assertTrue(np.all(elementWise == [ 0., 2.75, 6., 9.75, 14., 18.75, 24., 29.75, 36., 42.75]),
'incorrect value for elementWise')
Test.assertEquals(dotProduct, 183.75, 'incorrect value for dotProduct')
# TODO: Replace <FILL IN> with appropriate code
from numpy.linalg import inv
A = np.matrix([[1,2,3,4],[5,6,7,8]])
print 'A:\n{0}'.format(A)
# Print A transpose
print '\nA transpose:\n{0}'.format(A.T)
# Multiply A by A transpose
AAt = np.array([[ 30, 70],
[ 70, 174]])
print '\nAAt:\n{0}'.format(AAt)
# Invert AAt with np.linalg.inv()
AAtInv = np.array([[ 0.54375, -0.21875],
[-0.21875, 0.09375]])
print '\nAAtInv:\n{0}'.format(AAtInv)
# Show inverse times matrix equals identity
# We round due to numerical precision
print '\nAAtInv * AAt:\n{0}'.format((AAtInv * AAt).round(4))
# TEST Matrix math (2c)
Test.assertTrue(np.all(AAt == np.matrix([[30, 70], [70, 174]])), 'incorrect value for AAt')
Test.assertTrue(np.allclose(AAtInv, np.matrix([[0.54375, -0.21875], [-0.21875, 0.09375]])),
'incorrect value for AAtInv')
# TODO: Replace <FILL IN> with appropriate code
features = np.array([1, 2, 3, 4])
print 'features:\n{0}'.format(features)
# The last three elements of features
lastThree = features[1:len(features)]
print '\nlastThree:\n{0}'.format(lastThree)
# TEST Slices (3a)
Test.assertTrue(np.all(lastThree == [2, 3, 4]), 'incorrect value for lastThree')
# TODO: Replace <FILL IN> with appropriate code
zeros = np.zeros(8)
ones = np.ones(8)
print 'zeros:\n{0}'.format(zeros)
print '\nones:\n{0}'.format(ones)
zerosThenOnes = np.hstack((zeros, ones)) # A 1 by 16 array
zerosAboveOnes = np.vstack((zeros, ones)) # A 2 by 8 array
print '\nzerosThenOnes:\n{0}'.format(zerosThenOnes)
print '\nzerosAboveOnes:\n{0}'.format(zerosAboveOnes)
# TEST Combining ndarray objects (3b)
Test.assertTrue(np.all(zerosThenOnes == [0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]),
'incorrect value for zerosThenOnes')
Test.assertTrue(np.all(zerosAboveOnes == [[0,0,0,0,0,0,0,0],[1,1,1,1,1,1,1,1]]),
'incorrect value for zerosAboveOnes')
from pyspark.mllib.linalg import DenseVector
# TODO: Replace <FILL IN> with appropriate code
numpyVector = np.array([-3, -4, 5])
print '\nnumpyVector:\n{0}'.format(numpyVector)
# Create a DenseVector consisting of the values [3.0, 4.0, 5.0]
myDenseVector = DenseVector([3.0, 4.0, 5.0])
# Calculate the dot product between the two vectors.
denseDotProduct = myDenseVector.dot(numpyVector)
print 'myDenseVector:\n{0}'.format(myDenseVector)
print '\ndenseDotProduct:\n{0}'.format(denseDotProduct)
# TEST PySpark's DenseVector (3c)
Test.assertTrue(isinstance(myDenseVector, DenseVector), 'myDenseVector is not a DenseVector')
Test.assertTrue(np.allclose(myDenseVector, np.array([3., 4., 5.])),
'incorrect value for myDenseVector')
Test.assertTrue(np.allclose(denseDotProduct, 0.0), 'incorrect value for denseDotProduct')
# Example function
def addS(x):
return x + 's'
print type(addS)
print addS
print addS('cat')
# As a lambda
addSLambda = lambda x: x + 's'
print type(addSLambda)
print addSLambda
print addSLambda('cat')
# TODO: Replace <FILL IN> with appropriate code
# Recall that: "lambda x, y: x + y" creates a function that adds together two numbers
multiplyByTen = lambda x: x * 10
print multiplyByTen(5)
# Note that the function still shows its name as <lambda>
print '\n', multiplyByTen
# TEST Python lambda expressions (4a)
Test.assertEquals(multiplyByTen(10), 100, 'incorrect definition for multiplyByTen')
# Code using def that we will recreate with lambdas
def plus(x, y):
return x + y
def minus(x, y):
return x - y
functions = [plus, minus]
print functions[0](4, 5)
print functions[1](4, 5)
# TODO: Replace <FILL IN> with appropriate code
# The first function should add two values, while the second function should subtract the second
# value from the first value.
lambdaFunctions = [lambda x, y: x + y , lambda x, y: x - y]
print lambdaFunctions[0](4, 5)
print lambdaFunctions[1](4, 5)
# TEST lambda fewer steps than def (4b)
Test.assertEquals(lambdaFunctions[0](10, 10), 20, 'incorrect first lambdaFunction')
Test.assertEquals(lambdaFunctions[1](10, 10), 0, 'incorrect second lambdaFunction')
# Examples. Note that the spacing has been modified to distinguish parameters from tuples.
# One-parameter function
a1 = lambda x: x[0] + x[1]
a2 = lambda (x0, x1): x0 + x1
print 'a1( (3,4) ) = {0}'.format( a1( (3,4) ) )
print 'a2( (3,4) ) = {0}'.format( a2( (3,4) ) )
# Two-parameter function
b1 = lambda x, y: (x[0] + y[0], x[1] + y[1])
b2 = lambda (x0, x1), (y0, y1): (x0 + y0, x1 + y1)
print '\nb1( (1,2), (3,4) ) = {0}'.format( b1( (1,2), (3,4) ) )
print 'b2( (1,2), (3,4) ) = {0}'.format( b2( (1,2), (3,4) ) )
# TODO: Replace <FILL IN> with appropriate code
# Use both syntaxes to create a function that takes in a tuple of two values and swaps their order
# E.g. (1, 2) => (2, 1)
swap1 = lambda x: (x[1], x[0])
swap2 = lambda (x0, x1): (x1, x0)
print 'swap1((1, 2)) = {0}'.format(swap1((1, 2)))
print 'swap2((1, 2)) = {0}'.format(swap2((1, 2)))
# Using either syntax, create a function that takes in a tuple with three values and returns a tuple
# of (2nd value, 3rd value, 1st value). E.g. (1, 2, 3) => (2, 3, 1)
swapOrder = lambda (x0, x1, x2) : (x1, x2, x0)
print 'swapOrder((1, 2, 3)) = {0}'.format(swapOrder((1, 2, 3)))
# Using either syntax, create a function that takes in three tuples each with two values. The
# function should return a tuple with the values in the first position summed and the values in the
# second position summed. E.g. (1, 2), (3, 4), (5, 6) => (1 + 3 + 5, 2 + 4 + 6) => (9, 12)
sumThree = lambda x0, x1, x2: (x0[0] + x1[0] + x2[0], x0[1] + x1[1] + x2[1])
print 'sumThree((1, 2), (3, 4), (5, 6)) = {0}'.format(sumThree((1, 2), (3, 4), (5, 6)))
# TEST Lambda expression arguments (4c)
Test.assertEquals(swap1((1, 2)), (2, 1), 'incorrect definition for swap1')
Test.assertEquals(swap2((1, 2)), (2, 1), 'incorrect definition for swap2')
Test.assertEquals(swapOrder((1, 2, 3)), (2, 3, 1), 'incorrect definition fo swapOrder')
Test.assertEquals(sumThree((1, 2), (3, 4), (5, 6)), (9, 12), 'incorrect definition for sumThree')
# Just run this code
# This code will fail with a syntax error, as we can't use print in a lambda expression
import traceback
try:
exec "lambda x: print x"
except:
traceback.print_exc()
# Create a class to give our examples the same syntax as PySpark
class FunctionalWrapper(object):
def __init__(self, data):
self.data = data
def map(self, function):
Call `map` on the items in `data` using the provided `function`
return FunctionalWrapper(map(function, self.data))
def reduce(self, function):
Call `reduce` on the items in `data` using the provided `function`
return reduce(function, self.data)
def filter(self, function):
Call `filter` on the items in `data` using the provided `function`
return FunctionalWrapper(filter(function, self.data))
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __getattr__(self, name): return getattr(self.data, name)
def __getitem__(self, k): return self.data.__getitem__(k)
def __repr__(self): return 'FunctionalWrapper({0})'.format(repr(self.data))
def __str__(self): return 'FunctionalWrapper({0})'.format(str(self.data))
# Map example
# Create some data
mapData = FunctionalWrapper(range(5))
# Define a function to be applied to each element
f = lambda x: x + 3
# Imperative programming: loop through and create a new object by applying f
mapResult = FunctionalWrapper([]) # Initialize the result
for element in mapData:
mapResult.append(f(element)) # Apply f and save the new value
print 'Result from for loop: {0}'.format(mapResult)
# Functional programming: use map rather than a for loop
print 'Result from map call: {0}'.format(mapData.map(f))
# Note that the results are the same but that the map function abstracts away the implementation
# and requires less code
# TODO: Replace <FILL IN> with appropriate code
dataset = FunctionalWrapper(range(10))
# Multiply each element by 5
mapResult = dataset.map(lambda x : x * 5)
# Keep the even elements
# Note that "x % 2" evaluates to the remainder of x divided by 2
filterResult = dataset.filter(lambda x : not(x % 2))
# Sum the elements
reduceResult = dataset.reduce(lambda x, y : x + y)
print 'mapResult: {0}'.format(mapResult)
print '\nfilterResult: {0}'.format(filterResult)
print '\nreduceResult: {0}'.format(reduceResult)
# TEST Functional programming (4e)
Test.assertEquals(mapResult, FunctionalWrapper([0, 5, 10, 15, 20, 25, 30, 35, 40, 45]),
'incorrect value for mapResult')
Test.assertEquals(filterResult, FunctionalWrapper([0, 2, 4, 6, 8]),
'incorrect value for filterResult')
Test.assertEquals(reduceResult, 45, 'incorrect value for reduceResult')
# Example of a mult-line expression statement
# Note that placing parentheses around the expression allow it to exist on multiple lines without
# causing a syntax error.
(dataset
.map(lambda x: x + 2)
.reduce(lambda x, y: x * y))
# TODO: Replace <FILL IN> with appropriate code
# Multiply the elements in dataset by five, keep just the even values, and sum those values
finalSum = dataset.map(lambda x : x * 5).filter(lambda x : not(x%2)).reduce(lambda x, y : x + y)
print finalSum
# TEST Composability (4f)
Test.assertEquals(finalSum, 100, 'incorrect value for finalSum')
# Run this code to view Criteo's agreement
# Note that some ad blocker software will prevent this IFrame from loading.
# If this happens, open the webpage in a separate tab and follow the instructions from above.
from IPython.lib.display import IFrame
IFrame("http://labs.criteo.com/downloads/2014-kaggle-display-advertising-challenge-dataset/",
600, 350)
# TODO: Replace <FILL IN> with appropriate code
# Just replace <FILL IN> with the url for dac_sample.tar.gz
import glob
import os.path
import tarfile
import urllib
import urlparse
# Paste url, url should end with: dac_sample.tar.gz
url = 'http://labs.criteo.com/wp-content/uploads/2015/04/dac_sample.tar.gz'
url = url.strip()
baseDir = os.path.join('data')
inputPath = os.path.join('cs190', 'dac_sample.txt')
fileName = os.path.join(baseDir, inputPath)
inputDir = os.path.split(fileName)[0]
def extractTar(check = False):
# Find the zipped archive and extract the dataset
tars = glob.glob('dac_sample*.tar.gz*')
if check and len(tars) == 0:
return False
if len(tars) > 0:
try:
tarFile = tarfile.open(tars[0])
except tarfile.ReadError:
if not check:
print 'Unable to open tar.gz file. Check your URL.'
return False
tarFile.extract('dac_sample.txt', path=inputDir)
print 'Successfully extracted: dac_sample.txt'
return True
else:
print 'You need to retry the download with the correct url.'
print ('Alternatively, you can upload the dac_sample.tar.gz file to your Jupyter root ' +
'directory')
return False
if os.path.isfile(fileName):
print 'File is already available. Nothing to do.'
elif extractTar(check = True):
print 'tar.gz file was already available.'
elif not url.endswith('dac_sample.tar.gz'):
print 'Check your download url. Are you downloading the Sample dataset?'
else:
# Download the file and store it in the same directory as this notebook
try:
urllib.urlretrieve(url, os.path.basename(urlparse.urlsplit(url).path))
except IOError:
print 'Unable to download and store: {0}'.format(url)
extractTar()
import os.path
baseDir = os.path.join('data')
inputPath = os.path.join('cs190', 'dac_sample.txt')
fileName = os.path.join(baseDir, inputPath)
if os.path.isfile(fileName):
rawData = (sc
.textFile(fileName, 2)
.map(lambda x: x.replace('\t', ','))) # work with either ',' or '\t' separated data
print rawData.take(1)
rawDataCount = rawData.count()
print rawDataCount
# This line tests that the correct number of observations have been loaded
assert rawDataCount == 100000, 'incorrect count for rawData'
if rawDataCount == 100000:
print 'Criteo data loaded successfully!'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: (1+1)-$\sigma$-Self-Adaptation-ES
Step3: Some explanations about $\sigma$ and $\tau$
Step4: Other inplementations
Step5: Define the objective function
|
<ASSISTANT_TASK:>
Python Code:
# Init matplotlib
%matplotlib inline
import matplotlib
matplotlib.rcParams['figure.figsize'] = (8, 8)
# Setup PyAI
import sys
sys.path.insert(0, '/Users/jdecock/git/pub/jdhp/pyai')
# Set the objective function
#from pyai.optimize.functions import sphere as func
from pyai.optimize.functions import sphere2d as func
#from pyai.optimize.functions import additive_gaussian_noise as noise
from pyai.optimize.functions import multiplicative_gaussian_noise as noise
#from pyai.optimize.functions import additive_poisson_noise as noise
#func.noise = noise # Comment this line to use a deterministic objective function
xmin = func.bounds[0] # TODO
xmax = func.bounds[1] # TODO
import numpy as np
import math
This is a simple Python implementation of the (mu/1, lambda)-sigmaSA-ES
as discussed in
http://www.scholarpedia.org/article/Evolution_Strategies
mu = 3 # mu: the number of parents
lmb = 12 # lambda: the number of children
rho = 1 # rho: number of parents per child
selection_operator = '+'
d = 2 # number of dimension of the solution space
num_gen = 10
tau = 1./math.sqrt(2.*d) # self-adaptation learning rate
# Init the population ##########################
# "pop" array layout:
# - the first mu lines contain parents
# - the next lambda lines contain children
# - the first column contains the individual's strategy (sigma)
# - the last column contains the individual's assess (f(x))
# - the other columns contain the individual value (x)
pop = np.full([mu+lmb, d+2], np.nan)
pop[:mu, 0] = 1. # init the parents strategy to 1.0
pop[:mu, 1:-1] = np.random.normal(0., 1., size=[mu,d]) # init the parents value
pop[:mu, -1] = func(pop[:mu, 1:-1].T) # evaluate parents
print("Initial population:\n", pop)
## Sort parents
#pop = pop[pop[:,-1].argsort()]
#print(pop)
for gen in range(num_gen):
# Make children ################################
if rho == 1:
# Each child is made from one randomly selected parent
pop[mu:,:] = pop[np.random.randint(mu, size=lmb)]
elif rho == mu:
# Recombine all parents for each child
raise NotImplemented() # TODO
elif 1 < rho < mu:
# Recombine rho randomly selected parents for each child
raise NotImplemented() # TODO
else:
raise ValueError()
pop[mu:,-1] = np.nan
#print("Children:\n", pop)
# Mutate children's sigma ######################
pop[mu:,0] = pop[mu:,0] * np.exp(tau * np.random.normal(size=lmb))
#print("Mutated children (sigma):\n", pop)
# Mutate children's value ######################
pop[mu:,1:-1] = pop[mu:,1:-1] + pop[mu:,1:-1] * np.random.normal(size=[lmb,d])
#print("Mutated children (value):\n", pop)
# Evaluate children ############################
pop[mu:, -1] = func(pop[mu:, 1:-1].T)
#print("Evaluated children:\n", pop)
# Select the best individuals ##################
if selection_operator == '+':
# *plus-selection* operator
pop = pop[pop[:,-1].argsort()]
elif selection_operator == ',':
# *comma-selection* operator
pop[:lmb,:] = pop[pop[mu:,-1].argsort()] # TODO: check this...
else:
raise ValueError()
pop[mu:, :] = np.nan
#print("Selected individuals for the next generation:\n", pop)
print("Result:\n", pop[:mu, :])
tau
import random
sigma_list = [1.]
for i in range(1000):
sigma_list.append(sigma_list[-1] * math.exp(tau * random.normalvariate(0., 1.))) # mutate sigma
#sigma = sigma * exp(tau*randn) # mutate sigma
plt.loglog(sigma_list);
x = np.linspace(-4, 4, 100)
y1 = np.exp(1./math.sqrt(1.*d) * x)
y2 = np.exp(1./math.sqrt(2.*d) * x)
y3 = np.exp(1./math.sqrt(3.*d) * x)
y4 = np.exp(1./(2.*d) * x)
plt.plot(x, y1, label="tau1")
plt.plot(x, y2, label="tau2")
plt.plot(x, y3, label="tau3")
plt.plot(x, y4, label="tau4")
plt.legend();
tau1 = 1./math.sqrt(1.*d)
tau2 = 1./math.sqrt(2.*d)
tau3 = 1./math.sqrt(3.*d)
tau4 = 1./(2.*d)
x1 = np.exp(tau1 * np.random.normal(size=[100000]))
x2 = np.exp(tau2 * np.random.normal(size=[100000]))
x3 = np.exp(tau3 * np.random.normal(size=[100000]))
x4 = np.exp(tau4 * np.random.normal(size=[100000]))
bins = np.linspace(0, 10, 100)
plt.hist(x1, bins=bins, alpha=0.5, label=r"$\exp\left(\frac{1}{\sqrt{d}} \mathcal{N}(0,1)\right)$", lw=2, histtype='step')
plt.hist(x2, bins=bins, alpha=0.5, label=r"$\exp\left(\frac{1}{\sqrt{2d}} \mathcal{N}(0,1)\right)$", lw=2, histtype='step')
plt.hist(x3, bins=bins, alpha=0.5, label=r"$\exp\left(\frac{1}{\sqrt{3d}} \mathcal{N}(0,1)\right)$", lw=2, histtype='step')
plt.hist(x4, bins=bins, alpha=0.5, label=r"$\exp\left(\frac{1}{2d} \mathcal{N}(0,1)\right)$", lw=2, histtype='step')
plt.xlim(-0.25, 7)
plt.axvline(1, color='k', linestyle='dotted')
plt.legend(fontsize='x-large');
# Init matplotlib
%matplotlib inline
import matplotlib
matplotlib.rcParams['figure.figsize'] = (8, 8)
# Setup PyAI
import sys
sys.path.insert(0, '/Users/jdecock/git/pub/jdhp/pyai')
import numpy as np
import time
from pyai.optimize import SAES
# Plot functions
from pyai.optimize.utils import plot_contour_2d_solution_space
from pyai.optimize.utils import plot_2d_solution_space
from pyai.optimize.utils import array_list_to_array
from pyai.optimize.utils import plot_fx_wt_iteration_number
from pyai.optimize.utils import plot_err_wt_iteration_number
from pyai.optimize.utils import plot_err_wt_execution_time
from pyai.optimize.utils import plot_err_wt_num_feval
## Objective function: Rosenbrock function (Scipy's implementation)
#func = scipy.optimize.rosen
# Set the objective function
#from pyai.optimize.functions import sphere as func
from pyai.optimize.functions import sphere2d as func
#from pyai.optimize.functions import additive_gaussian_noise as noise
from pyai.optimize.functions import multiplicative_gaussian_noise as noise
#from pyai.optimize.functions import additive_poisson_noise as noise
func.noise = noise # Comment this line to use a deterministic objective function
xmin = func.bounds[0] # TODO
xmax = func.bounds[1] # TODO
%%time
saes = SAES()
func.do_eval_logs = True
func.reset_eval_counters()
func.reset_eval_logs()
res = saes.minimize(func, init_pop_mu=0., init_pop_sigma=1.)
func.do_eval_logs = False
eval_x_array = np.array(func.eval_logs_dict['x']).T
eval_error_array = np.array(func.eval_logs_dict['fx']) - func(func.arg_min)
res
plot_contour_2d_solution_space(func,
xmin=xmin,
xmax=xmax,
xstar=res,
xvisited=eval_x_array,
title="SAES");
plot_err_wt_num_feval(eval_error_array, x_log=True, y_log=True)
%%time
eval_error_array_list = []
NUM_RUNS = 100
for run_index in range(NUM_RUNS):
saes = SAES()
func.do_eval_logs = True
func.reset_eval_counters()
func.reset_eval_logs()
res = saes.minimize(func, init_pop_mu=0., init_pop_sigma=1., lmb=6)
func.do_eval_logs = False
eval_error_array = np.array(func.eval_logs_dict['fx']) - func(func.arg_min)
print("x* =", res)
eval_error_array_list.append(eval_error_array);
plot_err_wt_num_feval(array_list_to_array(eval_error_array_list), x_log=True, y_log=True, plot_option="mean")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here I'm running GSEA on the fraction upregulated signal across genes.
Step2: First I do a greedy filter based on p-values to find non-overlapping gene sets that are significantly associated with the tumor signal. For details see the filter_pathway_hits funciton.
Step3: The cell-cycle is a large pathway with lots of subsets in the mSigDB database. Here I'm looking for significant subsets within this pathway.
Step4: These two gene sets are completely non-overlapping subsets of the cell-cycle.
Step5: I can also change the greedy filter to look for gene-sets with large effect sizes as opposed to p-values. This is going to give us smaller, but more specific gene-sets.
Step6: Interestingly unwinding of DNA has a very large effect size but is a relatviely small gene set at only 11 genes.
|
<ASSISTANT_TASK:>
Python Code:
import NotebookImport
from DX_screen import *
gs2 = gene_sets.ix[dx_rna.index].fillna(0)
rr = screen_feature(dx_rna.frac, rev_kruskal, gs2.T,
align=False)
fp = (1.*gene_sets.T * dx_rna.frac).T.dropna().replace(0, np.nan).mean().order()
fp.name = 'mean frac'
ff_u = filter_pathway_hits(rr.ix[ti(fp>.5)].p.order(), gs2)
ff_p = filter_pathway_hits(rr.ix[ti(fp<.5)].p.order(), gs2)
ff = ff_u.append(ff_p)
selected = rr.ix[ff[ff < .00001].index].join(fp)
selected.sort('p')
d = pd.DataFrame({g: gs2['REACTOME_CELL_CYCLE'] for g in gs2.columns})
a,b = odds_ratio_df(d.T>0, gs2.T>0)
dd = rr.ix[ti((a > 100) & (rr.q < 10e-15))].join(fp).sort(fp.name, ascending=False)
filter_pathway_hits(dd, gs2)
m_g1 = 'REACTOME_M_G1_TRANSITION'
cepna = 'REACTOME_DEPOSITION_OF_NEW_CENPA_CONTAINING_NUCLEOSOMES_AT_THE_CENTROMERE'
combine(gs2[m_g1]>0, gs2[cepna]>0).value_counts()
fig, ax = subplots()
v = pd.concat([dx_rna.frac,
dx_rna.frac.ix[ti(gs2['REACTOME_CELL_CYCLE']>0)],
dx_rna.frac.ix[ti(gs2[m_g1]>0)],
dx_rna.frac.ix[ti(gs2[cepna]>0)]
]).dropna()
v1 = pd.concat([pd.Series('All Genes', dx_rna.frac.index),
pd.Series('Cell Cycle', ti(gs2['REACTOME_CELL_CYCLE']>0)),
pd.Series('M/G1\nTransition', ti(gs2[m_g1]>0)),
pd.Series('CEPNA\nDeposition', ti(gs2[cepna]>0))
])
v1.name = ''
v.name = 'Fraction Overexpressed'
o = ['All Genes','Cell Cycle','CEPNA\nDeposition',
'M/G1\nTransition']
violin_plot_pandas(v1, v, order=o, ann=None, ax=ax)
prettify_ax(ax)
ax.spines['bottom'].set_visible(False)
ax.axhline(.5, color='grey', lw=2, ls='--');
f2 = fp.ix[ti(rr.q < .00001)]
ff_u = filter_pathway_hits(fp.ix[ti(f2>.5)].order()[::-1], gs2)
ff_p = filter_pathway_hits(fp.ix[ti(f2<.5)].order(), gs2)
ff = ff_u.append(ff_p)
selected = rr.ix[ff.index].join(f2)
selected.ix[(f2 - .5).abs().order().index[::-1]].dropna()
unwind = 'REACTOME_UNWINDING_OF_DNA'
telo = 'REACTOME_EXTENSION_OF_TELOMERES'
fig, ax = subplots()
v = pd.concat([dx_rna.frac,
dx_rna.frac.ix[ti(gs2[cepna]>0)],
dx_rna.frac.ix[ti(gs2[unwind]>0)],
dx_rna.frac.ix[ti(gs2[telo]>0)]
]).dropna()
v1 = pd.concat([pd.Series('All Genes', dx_rna.frac.index),
pd.Series('CEPNA\nDeposition', ti(gs2[cepna]>0)),
pd.Series('Unwinding\nof DNA', ti(gs2[unwind]>0)),
pd.Series('Extension\nof Telomeres', ti(gs2[telo]>0))
])
v1.name = ''
v.name = 'Fraction Overexpressed'
o = ['All Genes', 'CEPNA\nDeposition',
'Extension\nof Telomeres', 'Unwinding\nof DNA', ]
violin_plot_pandas(v1, v, order=o, ann=None, ax=ax)
prettify_ax(ax)
ax.spines['bottom'].set_visible(False)
ax.axhline(.5, color='grey', lw=2, ls='--');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: plot mean power
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_band_induced_power
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax, event_id = -0.2, 0.5, 1
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
events = events[:10] # take 10 events to keep the computation time low
# Use linear detrend to reduce any edge artifacts
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
preload=True, detrend=1)
# Compute a source estimate per frequency band
bands = dict(alpha=[9, 11], beta=[18, 22])
stcs = source_band_induced_power(epochs, inverse_operator, bands, n_cycles=2,
use_fft=False, n_jobs=1)
for b, stc in stcs.iteritems():
stc.save('induced_power_%s' % b)
plt.plot(stcs['alpha'].times, stcs['alpha'].data.mean(axis=0), label='Alpha')
plt.plot(stcs['beta'].times, stcs['beta'].data.mean(axis=0), label='Beta')
plt.xlabel('Time (ms)')
plt.ylabel('Power')
plt.legend()
plt.title('Mean source induced power')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'mpi-esm-1-2-hr', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The performance here is very poor. We really need to train with more samples and for more epochs.
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
%matplotlib inline
plt.gray()
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
fig, axes = plt.subplots(3,5, figsize=(12,8))
for i, ax in enumerate(axes.flatten()):
ax.imshow(X_train[i], interpolation='nearest')
plt.show()
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
batch_size = 512
nb_classes = 10
nb_epoch = 3
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# CAUTION: Without utilizing a GPU even this very short example is incredibly slow to run.
model = Sequential()
#model.add(Convolution2D(8, 1, 3, 3, input_shape=(1,28,28), activation='relu'))
model.add(Convolution2D(4, 3, 3, input_shape=(1,28,28), activation='relu'))
#model.add(Convolution2D(4, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4, input_dim=4*28*28*0.25, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, input_dim=4, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
model.fit(X_train[:1024], Y_train[:1024], batch_size=batch_size, nb_epoch=nb_epoch, verbose=1,
validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score)
predictions = model.predict_classes(X_test)
fig, axes = plt.subplots(3,5, figsize=(12,8))
for i, ax in enumerate(axes.flatten()):
ax.imshow(X_test[predictions == 7][i].reshape((28,28)), interpolation='nearest')
plt.show()
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, predictions)
np.fill_diagonal(cm, 0)
plt.bone()
plt.matshow(cm)
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up our source space.
Step2: Export source positions to nift file
|
<ASSISTANT_TASK:>
Python Code:
# Author: Annalisa Pascarella <a.pascarella@iac.cnr.it>
#
# License: BSD (3-clause)
import os.path as op
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne import setup_volume_source_space
from mne import make_forward_solution
from mne.minimum_norm import make_inverse_operator, apply_inverse
from nilearn import plotting
# Set dir
data_path = sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_mixed_src = op.join(bem_dir, '%s-oct-6-mixed-src.fif' % subject)
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_evoked = data_dir + '/sample_audvis-ave.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_fwd = data_dir + '/sample_audvis-meg-oct-6-mixed-fwd.fif'
fname_cov = data_dir + '/sample_audvis-shrunk-cov.fif'
# List substructures we are interested in. We select only the
# sub structures we want to include in the source space
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
# Get a surface-based source space. We could set one up like this::
#
# >>> src = setup_source_space(subject, fname=None, spacing='oct6',
# add_dist=False, subjects_dir=subjects_dir)
#
# But we already have one saved:
src = mne.read_source_spaces(op.join(bem_dir, 'sample-oct-6-src.fif'))
# Now we create a mixed src space by adding the volume regions specified in the
# list labels_vol. First, read the aseg file and the source space bounds
# using the inner skull surface (here using 10mm spacing to save time):
vol_src = setup_volume_source_space(
subject, mri=fname_aseg, pos=7.0, bem=fname_model,
volume_label=labels_vol, subjects_dir=subjects_dir, verbose=True)
# Generate the mixed source space
src += vol_src
# Visualize the source space.
src.plot(subjects_dir=subjects_dir)
n = sum(src[i]['nuse'] for i in range(len(src)))
print('the src space contains %d spaces and %d points' % (len(src), n))
# We could write the mixed source space with::
#
# >>> write_source_spaces(fname_mixed_src, src, overwrite=True)
#
nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True)
plotting.plot_img(nii_fname, cmap=plt.cm.spectral)
plt.show()
# Compute the fwd matrix
fwd = make_forward_solution(fname_evoked, fname_trans, src, fname_bem,
mindist=5.0, # ignore sources<=5mm from innerskull
meg=True, eeg=False, n_jobs=1)
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
src_fwd = fwd['src']
n = sum(src_fwd[i]['nuse'] for i in range(len(src_fwd)))
print('the fwd src space contains %d spaces and %d points' % (len(src_fwd), n))
# Load data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname_evoked, condition=condition,
baseline=(None, 0))
noise_cov = mne.read_cov(fname_cov)
# Compute inverse solution and for each epoch
snr = 3.0 # use smaller SNR for raw data
inv_method = 'MNE' # sLORETA, MNE, dSPM
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
lambda2 = 1.0 / snr ** 2
# Compute inverse operator
inverse_operator = make_inverse_operator(evoked.info, fwd, noise_cov,
depth=None, fixed=False)
stcs = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori=None)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(subject, parc=parc,
subjects_dir=subjects_dir)
# Average the source estimates within each label of the cortical parcellation
# and each sub structure contained in the src space
# If mode = 'mean_flip' this option is used only for the surface cortical label
src = inverse_operator['src']
label_ts = mne.extract_label_time_course([stcs], labels_parc, src,
mode='mean',
allow_empty=True,
return_generator=False)
# plot the times series of 2 labels
fig, axes = plt.subplots(1)
axes.plot(1e3 * stcs.times, label_ts[0][0, :], 'k', label='bankssts-lh')
axes.plot(1e3 * stcs.times, label_ts[0][71, :].T, 'r',
label='Brain-stem')
axes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)')
axes.legend()
mne.viz.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get a List of Orders for an Account
Step2: List all Pending Orders in an Account
Step3: Get Details for a Single Order in an Account
Step4: Replace an Order in an Account by simultaneously cancelling it and createing a replacement Order.
Step5: Cancel a pending Order in an Account.
Step6: MKT Order
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import oandapyV20
import oandapyV20.endpoints.orders as orders
import configparser
config = configparser.ConfigParser()
config.read('../config/config_v20.ini')
accountID = config['oanda']['account_id']
access_token = config['oanda']['api_key']
client = oandapyV20.API(access_token=access_token)
data = {
"order": {
"price": "1.2",
"stopLossOnFill": {
"timeInForce": "GTC",
"price": "1.22"
},
"timeInForce": "GTC",
"instrument": "EUR_USD",
"units": "-100",
"type": "LIMIT",
"positionFill": "DEFAULT"
}
}
r = orders.OrderCreate(accountID, data=data)
client.request(r)
print(r.response)
pd.Series(r.response['orderCreateTransaction'])
r = orders.OrderList(accountID)
client.request(r)
print(r.response)
pd.Series(r.response['orders'][0])
r = orders.OrdersPending(accountID)
client.request(r)
print(r.response)
res = r.response['orders']
print(res)
last_order_id = res[0]['id']
pd.Series(r.response['orders'][0])
r = orders.OrderDetails(accountID=accountID, orderID=last_order_id)
client.request(r)
data = {
"order": {
"units": "-500000",
"instrument": "EUR_USD",
"price": "1.25000",
"type": "LIMIT"
}
}
r = orders.OrderReplace(accountID=accountID, orderID=last_order_id, data=data)
client.request(r)
print(r.response)
req_id = r.response['lastTransactionID']
r = orders.OrderCancel(accountID=accountID, orderID=req_id)
client.request(r)
print(r.response)
last_order_id
data = {"order":
{"units": "100",
"instrument": "GBP_USD",
"timeInForce": "FOK",
"type": "MARKET",
"positionFill": "DEFAULT"
},
}
r = orders.OrderCreate(accountID, data=data)
client.request(r)
print(r.response)
pd.Series(r.response['orderCreateTransaction'])
pd.Series(r.response['orderFillTransaction'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
## Your code here
import random
threshold = 1e-5
wordsInt = sorted(int_words)
print(wordsInt[:30])
pass
bins = np.bincount(wordsInt)
print(bins[:30])
frequencies = np.zeros(len(words), dtype=float)
for index, singlebin in enumerate(bins):
frequencies[index] = singlebin / len(int_words)
print(frequencies[:30])
probs = np.zeros(len(words), dtype=float)
for index, singlefrequency in enumerate(frequencies):
probs[index] = 1 - np.sqrt(threshold/singlefrequency)
print(probs[:30])
# Discard some word considering single word discarding probability
train_words = []
for int_word in int_words:
discardRandom = random.random()
if probs[int_word] > discardRandom:
print("Skip one occurence of " + int_to_vocab[int_word])
else:
train_words.append(int_word)
print(train_words[:30])
print(len(train_words))
#Solution (faster and cleaner)
from collections import Counter
import random
threshold_2 = 1e-5
word_counts = Counter(int_words)
total_count = len(int_words)
freqs = {word: count/total_count for word, count in word_counts.items()}
p_drop = {word: 1 - np.sqrt(threshold_2/freqs[word]) for word in word_counts}
train_words_2 = [word for word in int_words if p_drop[word] < random.random()]
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
# My wrong implementation
#C = random.uniform(1,window_size,1)
#return words[idx-C:idx-1] + words[idx+1:idx+C]
#Solution
R = np.random.randint(1, window_size+1)
start = idx - R if (idx - R) > 0 else 0
stop = idx + R
target_words = set(words[start:idx] + words[idx+1:stop+1])
return list(target_words)
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, shape=(None), name="inputs")
labels = tf.placeholder(tf.int32, shape=(None, None), name="labels")
n_vocab = len(int_to_vocab)
n_embedding = 200
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform(shape=(n_vocab, n_embedding), minval=-1.0, maxval=1.0))
embed = tf.nn.embedding_lookup(embedding, inputs)
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal(shape=(n_embedding, n_vocab), mean=0.0, stddev=0.01))
softmax_b = tf.Variable(tf.zeros(n_vocab))
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(weights=tf.transpose(softmax_w), biases=softmax_b,
labels=labels, inputs=embed,
num_sampled=100, num_classes=n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Represent the following table using a data structure of your choice
Step2: Calculate the mean temperature and mean humidity
Step3: Print outlook and play for those days where the temperature is greater than the average temperature
Step4: Print outlook and play for those days where the humidity is greater than the average humidity
Step5: Convert the temperature to Celsius and add a new column therefore in the table.
Step6: #1
Step7: #2
Step8: #3
Step9: #4
Step10: #5
Step11: The only inferences I can make from the scatter plot above, is that you always play when the humidity is between 70 and 85. Temperature seems to play no part of the decision process when you go out to play as from teh scatter plot the plays and no play poionts are evenly distributed across the y axis (Temperature).
Step12: Merging
Step13: Plot CA vs AK
Step14: New England vs Southwest
Step15: Greatest Change in Population
Step16: As you can see from teh table above, CA had the largest growth in terms of raw numbers for the time period. However, we can gain additional insites by looking at percentatge growth.
Step17: Some states had no net growth and some had negative growth
Step18: 3
Step19: Test/Train Split
Step20: Train Model
Step21: Evaluation
Step22: As you can see from the confusion matrix, inputs of Class 1 & 2 were perfectly classified. There were only 2 mistakes on Class 3.
Step23: What are the statistical distributions of variables using no class?
Step24: How much missing data is there?
Step25: How do distributions differ by each gender?
Step26: Describe summary statistics for each attribute.
Step27: Visualize potential difference via the scatter plots.
Step28: Create a new variable for the weight in lbs
Step29: Add new variable weight + height.
Step30: BMI
Step31: Split Data By Sport
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
%pylab inline
df = pd.read_csv("weather.csv", header=0, index_col=0)
df
mean_temp = df["temperature"].mean()
mean_temp
mean_humidity = df["humidity"].mean()
mean_humidity
temp_selector = df['temperature'] > mean_temp
df[temp_selector][["outlook", "play"]]
humidity_selector = df['humidity'] > mean_humidity
df[humidity_selector][["outlook", "play"]]
df["temp_C"] = ( df["temperature"] - 32 ) * (5/9.0)
df
play_selector = df["play"]=="yes"
play_days = df[play_selector]
len(play_days)
sunny_selector = df["outlook"]=="sunny"
sunny_play_days = df[sunny_selector & play_selector]
len(sunny_play_days)
print play_days["temperature"].mean()
print play_days["temperature"].min()
print play_days["temperature"].max()
print play_days["humidity"].mean()
print play_days["humidity"].min()
print play_days["humidity"].max()
pyplot.ylabel('Temperature')
pyplot.xlabel("Humidity")
pyplot.scatter(x=play_days["humidity"], y=play_days["temperature"], c='green')
no_play_days = df[df["play"]=="no"]
pyplot.scatter(x=no_play_days["humidity"], y=no_play_days["temperature"], c='red', marker="x")
pyplot.legend(['Play', "No Play"])
#these are in units of thousands, need to scale
df1 = pd.read_fwf("processed/st0009ts.txt", header=0, index_col=0, thousands=",").transpose()
df2 = pd.read_fwf("processed/st1019ts.txt", header=0, index_col=0, thousands=",").transpose()
df3 = pd.read_fwf("processed/st2029ts.txt", header=0, index_col=0, thousands=",").transpose()
df4 = pd.read_fwf("processed/st3039ts.txt", header=0, index_col=0, thousands=",").transpose()
df5 = pd.read_fwf("processed/st4049ts.txt", header=0, index_col=0, thousands=",").transpose()
df6 = pd.read_fwf("processed/st5060ts.txt", header=0, index_col=0, thousands=",").transpose()
df7 = pd.read_fwf("processed/st6070ts.txt", header=0, index_col=0, thousands=",").transpose()
df = pd.concat([df1, df2, df3, df4, df5, df6, df7])
#scale up to unit of 1
df = df.apply(lambda x: x*1000)
#for some reason, this dataset format uses '.'s in U.S. but doesn't for anything else. We'll normalize it here
df[["U.S."]]
df.rename(columns={'U.S.': 'US'}, inplace=True)
#the file format changes here
transform = lambda x: "19"+x[2:4]
df_9 = pd.read_fwf("processed/st7080ts.txt", header=0, index_col=0, thousands=",").transpose()
df_9.index = df_9.index.map(transform)
df_10 = pd.read_fwf("processed/st8090ts.txt", header=0, index_col=0, thousands=",").transpose()
df_10.index = df_10.index.map(transform)
df_10
df_2 = pd.concat([df_9, df_10])
# now merge the two together to get the compleete mergered df
df = pd.concat([df, df_2])
df=df.sort_index() #sort
df[["CA", "AK"]].plot()
df["New England"] = df[["CT", "ME", "MA", "NH", "RI", "VT"]].sum(axis=1)
df["Southwest"] = df[["AZ", "CA", "CO", "NV", "NM", "TX", "UT"]].sum(axis=1)
df[["New England", "Southwest"]].plot()
#remove a few composite columns:
df.drop('US', axis=1, inplace=True)
df.drop('Southwest', axis=1, inplace=True)
df.drop('New England', axis=1, inplace=True)
delta = {}
rel_delta={}
for state in df.columns:
delta[state]=df[state].iloc[-1] - df[state].iloc[50]
rel_delta[state] = (df[state].iloc[-1] - df[state].iloc[50]) / df[state].iloc[50]*1. * 100
ddf=pd.DataFrame(delta, index=["delta"]).transpose()
ddf = ddf.sort(["delta"], ascending=False)
ddf.head()
ddp=pd.DataFrame(rel_delta, index=["% change"]).transpose()
ddp = ddp.sort(["% change"], ascending=False)
ddp.head()
ddp.tail(n=10)
from sklearn import tree
import numpy as np
wine = np.loadtxt("wine.data", delimiter=',')
#Get the targets (first column of file)
Y = wine[:, 0]
#Remove targets from input data
X = wine[:, 1:]
#lets split into a test and training set
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=9)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, Y_train)
clf.score(X_test, Y_test)
import matplotlib.pyplot as plt
%matplotlib inline
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(3)
plt.xticks(tick_marks, ["1", "2", "3"], rotation=45)
plt.yticks(tick_marks, ["1", "2", "3"])
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
from sklearn.metrics import confusion_matrix
y_true = Y_test
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_true, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
plt.show()
odf = pd.read_csv("hmwk_1_data/AHW_1.csv")
odf.head()
odf["Age"].plot(kind="hist")
odf["Age"].describe()
odf["Weight"].plot(kind="hist")
odf["Weight"].describe()
odf["Height"].plot(kind="hist")
odf["Height"].describe()
odf.isnull().sum()
male = odf["Sex"]=="M"
female = odf["Sex"]=="F"
odf[male]["Age"].plot(kind="hist")
odf[female]["Age"].plot(kind="hist")
odf[male]["Weight"].plot(kind="hist")
odf[female]["Weight"].plot(kind="hist")
odf[male]["Height"].plot(kind="hist")
odf[female]["Height"].plot(kind="hist")
odf.describe()
from pandas.tools.plotting import scatter_matrix
pd.scatter_matrix(odf, alpha=0.2, figsize=(10, 10), diagonal='kde')
odf["lbs"] = odf["Weight"] * 2.20462
odf.head()
pd.scatter_matrix(odf, alpha=0.2, figsize=(10, 10), diagonal='kde')
odf["w+h"] = odf["Weight"] + odf["Height"]
odf.drop('lbs', axis=1, inplace=True)
odf.head()
pd.scatter_matrix(odf, alpha=0.2, figsize=(10, 10), diagonal='kde')
odf["BMI"] = odf["Weight"] / ((odf["Height"]*0.01)**2)
odf.head()
odf[male]["BMI"].plot(kind="hist")
odf[female]["BMI"].plot(kind="hist")
print odf[male]["BMI"].describe()
print
print odf[female]["BMI"].describe()
sports = list(set(odf["Sport"]))
sports
# choose 3 random sports
sports
import random
random_sports = random.sample(sports, 3)
for sport in random_sports:
sport_selector = odf["Sport"] == sport
odf[sport_selector].plot(kind="scatter", x="Height", y="Weight", marker='x')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: pandas
Step2: blaze
Step3: bquery without caching
Step4: bquery with caching
Step5: Running Times Summary
Step6: Graphic Summary
Step7: The Light yellow shows the one-time factorization caching (which after the first run can be left out of future queries).
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import itertools as itt
import time
import shutil
import os
import contextlib
import pandas as pd
import blaze as blz
import bquery
import cytoolz
from cytoolz.curried import pluck as cytoolz_pluck
from collections import OrderedDict
import copy
from prettyprint import pp
elapsed_times = OrderedDict()
@contextlib.contextmanager
def ctime(message=None):
"Counts the time spent in some context"
assert message is not None
global elapsed_times
t_elapsed = 0.0
print('\n')
t = time.time()
yield
if message:
print message + ": ",
t_elapsed = time.time() - t
print round(t_elapsed, 4), "sec"
elapsed_times[message] = t_elapsed
ga = itt.cycle(['ES', 'NL'])
gb = itt.cycle(['b1', 'b2', 'b3', 'b4', 'b5'])
gx = itt.cycle([1, 2])
gy = itt.cycle([-1, -2])
rootdir = 'bench-data.bcolz'
if os.path.exists(rootdir):
shutil.rmtree(rootdir)
n_rows = 1000000
# -- data
z = np.fromiter(((a, b, x, y) for a, b, x, y in itt.izip(ga, gb, gx, gy)),
dtype='S2,S2,i8,i8', count=n_rows)
ct = bquery.ctable(z, rootdir=rootdir)
ct.flush()
print('Simple Test Case')
df = pd.DataFrame(z)
with ctime(message='pandas'):
result = df.groupby(['f0'], sort=False, as_index=False)['f2'].sum()
# print(result)
print('Simple Test Case')
blaze_data = blz.Data(ct.rootdir)
expr = blz.by(blaze_data.f0, sum_f2=blaze_data.f2.sum())
with ctime(message='blaze (pandas + bcolz)'):
result = blz.compute(expr)
# print result
print('Simple Test Case')
with ctime(message='bquery + bcolz'):
result = ct.groupby(['f0'], ['f2'])
# print(result)
print('Simple Test Case')
with ctime(message='bquery, create factorization cache'):
ct.cache_factor(['f0'], refresh=True)
with ctime(message='bquery + bcolz (fact. cached)'):
result = ct.groupby(['f0'], ['f2'])
# print(result)
print('Simple Test Case Running Time')
elapsed_times_bak = OrderedDict({ k: v for (k,v) in sorted(elapsed_times.iteritems())})
pp(elapsed_times_bak)
print('Simple Test Case Running Time relative to Pandas')
elapsed_times_bak = OrderedDict({ k: v for (k,v) in sorted(elapsed_times.iteritems())})
pp(elapsed_times_bak)
elapsed_times = elapsed_times_bak
elapsed_times_norm = OrderedDict({ k: v/elapsed_times['pandas'] for (k,v) in sorted(elapsed_times.iteritems())})
print '\nNormalized running time'
pp(elapsed_times_norm)
if 'bquery, create factorization cache' in elapsed_times_norm:
base_bquery = elapsed_times_norm.pop('bquery, create factorization cache')
labels = []
val = []
for k,v in sorted(elapsed_times_norm.iteritems(), reverse=True):
labels.append(k)
val.append(v)
pos = np.arange(len(elapsed_times_norm))+.5 # the bar centers on the y axis
print elapsed_times_norm.keys()
plt.figure(1, figsize=[15,5])
plt.grid(True)
plt.barh(pos,val, align='center')
plt.barh(pos,[0, base_bquery, 0,0],
left=[0, elapsed_times_norm['bquery + bcolz (fact. cached)'], 0, 0],
align='center', color = '#FFFFCC')
plt.yticks(pos, labels, fontsize=15)
plt.xlabel('X times slower', fontsize=15)
plt.title('Performance compared to pandas', fontsize=25)
elapsed_times = OrderedDict()
ga = itt.cycle(['ES', 'NL'])
gb = itt.cycle(['b1', 'b2', 'b3', 'b4'])
gc = itt.cycle([1, 2])
gd = itt.cycle([3, 4, 4, 3])
ge = itt.cycle(['c','d','e'])
gx = itt.cycle([1, 2])
gy = itt.cycle([-1, -2])
gz = itt.cycle([1.11, 2.22, 3.33, 4.44, 5.55])
rootdir = 'bench-data.bcolz'
if os.path.exists(rootdir):
shutil.rmtree(rootdir)
n_rows = 1000000
print('Rows: ', n_rows)
z = np.fromiter(((a, b, c, d, e, x, y, z) for a, b, c, d, e, x, y, z
in itt.izip(ga, gb, gc, gd, ge, gx, gy, gz)),
dtype='S2,S2,i4,i8,S1,i4,i8,f8', count=n_rows)
ct = bquery.ctable(z, rootdir=rootdir, )
# -- pandas --
df = pd.DataFrame(z)
with ctime(message='pandas'):
result = df.groupby(['f0','f1','f2','f3','f4'], sort=False, as_index=False)['f5','f6','f7'].sum()
# print(result)
# -- bquery --
with ctime(message='bquery + bcolz'):
result = ct.groupby(['f0','f1','f2','f3','f4'], ['f5','f6','f7'])
# print(result)
with ctime(message='bquery, create factorization cache'):
ct.cache_factor(['f0','f1','f2','f3','f4'], refresh=True)
with ctime(message='bquery over bcolz (factorization cached)'):
result = ct.groupby(['f0','f1','f2','f3','f4'], ['f5','f6','f7'])
# print(result)
print('Complex Test Case Running Time relative to Pandas')
elapsed_times_bak = OrderedDict({ k: v for (k,v) in sorted(elapsed_times.iteritems())})
pp(elapsed_times_bak)
elapsed_times = elapsed_times_bak
elapsed_times_norm = OrderedDict({ k: v/elapsed_times['pandas'] for (k,v) in sorted(elapsed_times.iteritems())})
print '\nNormalized running time'
pp(elapsed_times_norm)
if 'bquery, create factorization cache' in elapsed_times_norm:
base_bquery = elapsed_times_norm.pop('bquery, create factorization cache')
labels = []
val = []
for k,v in sorted(elapsed_times_norm.iteritems(), reverse=True):
labels.append(k)
val.append(v)
pos = np.arange(len(elapsed_times_norm))+.5 # the bar centers on the y axis
print elapsed_times_norm.keys()
plt.figure(1, figsize=[15,5])
plt.grid(True)
plt.barh(pos,val, align='center')
plt.barh(pos,[0, base_bquery, 0],
left=[0, elapsed_times_norm['bquery over bcolz (factorization cached)'], 0],
align='center', color = '#FFFFCC')
plt.yticks(pos, labels, fontsize=15)
plt.xlabel('X times slower', fontsize=15)
plt.title('Performance compared to pandas', fontsize=25)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the Dataset
Step2: Lalonde Dataset
Step3: Step 1
Step4: Lalonde
Step5: Step 2
Step6: Lalonde
Step7: Step 3
Step8: Lalonde
Step9: Step 4
Step10: Replace Treatment with Placebo
Step11: Remove Random Subset of Data
Step12: Lalonde
Step13: Replace Treatment with Placebo
Step14: Remove Random Subset of Data
|
<ASSISTANT_TASK:>
Python Code:
import dowhy
from dowhy import CausalModel
import pandas as pd
import numpy as np
# Config dict to set the logging level
import logging.config
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'': {
'level': 'WARN',
},
}
}
logging.config.dictConfig(DEFAULT_LOGGING)
# Disabling warnings output
import warnings
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
data = pd.read_csv("https://raw.githubusercontent.com/AMLab-Amsterdam/CEVAE/master/datasets/IHDP/csv/ihdp_npci_1.csv", header = None)
col = ["treatment", "y_factual", "y_cfactual", "mu0", "mu1" ,]
for i in range(1,26):
col.append("x"+str(i))
data.columns = col
data = data.astype({"treatment":'bool'}, copy=False)
data.head()
from rpy2.robjects import r as R
from os.path import expanduser
home = expanduser("~")
%reload_ext rpy2.ipython
# %R install.packages("Matching")
%R library(Matching)
%R data(lalonde)
%R -o lalonde
lalonde = lalonde.astype({'treat':'bool'}, copy=False)
lalonde.head()
# Create a causal model from the data and given common causes
common_causes = []
for i in range(1, 26):
common_causes += ["x"+str(i)]
ihdp_model = CausalModel(
data=data,
treatment='treatment',
outcome='y_factual',
common_causes=common_causes
)
ihdp_model.view_model(layout="dot")
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
lalonde_model = CausalModel(
data=lalonde,
treatment='treat',
outcome='re78',
common_causes='nodegr+black+hisp+age+educ+married'.split('+')
)
lalonde_model.view_model(layout="dot")
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
#Identify the causal effect for the ihdp dataset
ihdp_identified_estimand = ihdp_model.identify_effect(proceed_when_unidentifiable=True)
print(ihdp_identified_estimand)
#Identify the causal effect for the lalonde dataset
lalonde_identified_estimand = lalonde_model.identify_effect(proceed_when_unidentifiable=True)
print(lalonde_identified_estimand)
ihdp_estimate = ihdp_model.estimate_effect(
ihdp_identified_estimand,
method_name="backdoor.propensity_score_weighting"
)
print("The Causal Estimate is " + str(ihdp_estimate.value))
lalonde_estimate = lalonde_model.estimate_effect(
lalonde_identified_estimand,
method_name="backdoor.propensity_score_weighting"
)
print("The Causal Estimate is " + str(lalonde_estimate.value))
ihdp_refute_random_common_cause = ihdp_model.refute_estimate(
ihdp_identified_estimand,
ihdp_estimate,
method_name="random_common_cause"
)
print(ihdp_refute_random_common_cause)
ihdp_refute_placebo_treatment = ihdp_model.refute_estimate(
ihdp_identified_estimand,
ihdp_estimate,
method_name="placebo_treatment_refuter",
placebo_type="permute"
)
print(ihdp_refute_placebo_treatment)
ihdp_refute_random_subset = ihdp_model.refute_estimate(
ihdp_identified_estimand,
ihdp_estimate,
method_name="data_subset_refuter",
subset_fraction=0.8
)
print(ihdp_refute_random_subset)
lalonde_refute_random_common_cause = lalonde_model.refute_estimate(
lalonde_identified_estimand,
lalonde_estimate,
method_name="random_common_cause"
)
print(lalonde_refute_random_common_cause)
lalonde_refute_placebo_treatment = lalonde_model.refute_estimate(
lalonde_identified_estimand,
lalonde_estimate,
method_name="placebo_treatment_refuter",
placebo_type="permute"
)
print(lalonde_refute_placebo_treatment)
lalonde_refute_random_subset = lalonde_model.refute_estimate(
lalonde_identified_estimand,
lalonde_estimate,
method_name="data_subset_refuter",
subset_fraction=0.9
)
print(lalonde_refute_random_subset)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Like the 1-dimensional problem previously, we can still do linear regression, except now we have two variables and therefore two weights as well. Let's denote the input variables as $x_1$ and $x_2$ and instead of using $m$ as the coefficient variable, let's use $w_1$ and $w_2$. So for linear regression, we would have the following function
Step2: Recall that we can evaluate the quality of our predictions using the sum-squared error function.
Step3: Given a dataset, we can use this setup to do linear regression, just as we did in the case of the one-dimensional dataset in th previous section. In this case, we are no longer able to visualize the loss surface, because our function has 3 parameters ($w_1$, $w_2$, and $b$) and therefore requires four dimensions to plot the loss surface with repsect to the parameters). But the regression would work just the same way; we can optimize our three parameters using calculus and finding the partial derivative of the cost function with respect to all the parameters.
Step4: A nice thing to do is to visualize our function. We can plot the prediction surface, $f(X)$ as a function of $X=\left[x_1,x_2\right]$ within some range. We do that in the next cell
Step5: We found a respectable cost applying linear regression to our dataset. But we can still do better. The problem with linear regression is the linearity. Sometimes, data is not so nicely shaped that a plane can fit it well. Real-world datasets are much more often irregularly shaped, with data being distributed along apparently curved surfaces.
Step6: Basically, a sigmoid just squashes any input it gets to between 0 and 1.
Step7: That migh look much worse, but we haven't optimized yet!
Step8: The function we posed above can already be considered a neural network. But let's complicate things a bit further, by adding a hidden layer. Neurons can be arranged in layers. So instead of having just two input neurons and an output neuron, let's place a layer of three neurons in the middle.
Step9: For simplicity, let's just keep all the biases, $b$ fixed to 0 for now. We'll bring them back layer.
Step10: Let's turn this basic operation into We can turn this into a class.
Step11: Instantiate a neural network with 2 input neurons, 3 hidden neurons, and 1 output neuron.
Step12: Now to do a forward pass, we can simply run the networks predict function
Step13: Gradient descent
Step14: The above function get_gradient calculates the gradient of a 2-layer network net, given a dataset X, y. In the next cell, we now go through the learning procedure. We load our dataset, instantiate a neural network, and train it on the data using the gradient method made above.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
iris = load_iris()
data, labels = iris.data[:,0:2], iris.data[:,2]
num_samples = len(labels) # size of our dataset
# shuffle the dataset
shuffle_order = np.random.permutation(num_samples)
data = data[shuffle_order, :]
labels = labels[shuffle_order]
def weighted_sum(x, w, b):
return b + np.dot(w, x)
# set our paramters
w = [0.2, 0.6]
b = -0.3
# for example, let's use the first data point
X, y = data, labels
pred_y = [weighted_sum(x, w, b) for x in X]
# let's print out the first prediction
print("for x=[%0.2f, %0.2f], predicted = %0.2f, actual = %0.2f" % (X[0][0], X[0][1], pred_y[0], y[0]))
# sum squared error
def cost(y_pred, y_actual):
return 0.5 * np.sum((y_actual-y_pred)**2)
error = cost(pred_y, y)
print(error)
import random
# grab our data
X, y = data, labels
# always a good idea to normalize
X = X / np.amax(X, axis=0)
y = y / np.amax(y, axis=0)
# choose a random initial m, b
w, b = [random.random(), random.random()], random.random()
# our function w1 * x1 + w2 * x2 + b
def F(X, w, b):
return np.sum(w*X, axis=1) + b
# what is our error?
y_pred = F(X, w, b)
init_cost = cost(y_pred, y)
print("initial parameters: w1=%0.3f, w2=%0.3f, b=%0.3f"%(w[0], w[1], b))
print("initial cost = %0.3f" % init_cost)
# implement partial derivatives of our parameters
def dJdw1(X, y, w, b):
return -np.dot(X[:,0], y - F(X, w, b))
def dJdw2(X, y, w, b):
return -np.dot(X[:,1], y - F(X, w, b))
def dJdb(X, y, w, b):
return -np.sum(y - F(X, w, b))
# choose the alpha parameter and number of iterations
alpha = 0.001
n_iters = 2000
# run through gradient descent
errors = []
for i in range(n_iters):
w[0] = w[0] - alpha * dJdw1(X, y, w, b)
w[1] = w[1] - alpha * dJdw2(X, y, w, b)
b = b - alpha * dJdb(X, y, w, b)
y_pred = F(X, w, b)
j = cost(y_pred, y)
errors.append(j)
# plot the error
plt.figure(figsize=(16, 3))
plt.plot(range(n_iters), errors, linewidth=2)
plt.title("Cost by iteration")
plt.ylabel("Cost")
plt.xlabel("iterations")
# what is our final error rate
y_pred = F(X, w, b)
final_cost = cost(y_pred, y)
print("final parameters: w1=%0.3f, w2=%0.3f, b=%0.3f"%(w[0], w[1], b))
print("final cost = %0.3f" % final_cost)
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
x1, x2 = np.meshgrid(np.arange(-10, 10, 1), np.arange(-10, 10, 1))
y = b + w[0]*x1 + w[1]*x2
ax.plot_surface(x1, x2, y, rstride=1, cstride=1, cmap=plt.cm.coolwarm, linewidth=0, antialiased=False)
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
x = np.arange(-10.0, 10.0, 0.2)
sig = sigmoid(x)
plt.plot(x, sig)
plt.title('Sigmoid function')
def weighted_sum(x, w, b):
return b + np.dot(w, x)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# reset our parameters
w = [0.2, 0.6]
b = -0.3
X, y = data, labels
# get weighted sum like before
Z = [weighted_sum(x, w, b) for x in X]
# now transform the weighted sums with a sigmoid
y_pred = [sigmoid(z) for z in Z]
# evaluate error
error = cost(y_pred, y)
print(error)
fig = plt.figure()
ax = fig.gca(projection='3d')
x1, x2 = np.meshgrid(np.arange(-10, 10, 1), np.arange(-10, 10, 1))
y = sigmoid(b + w[0]*x1 + w[1]*x2)
ax.plot_surface(x1, x2, y, rstride=1, cstride=1, cmap=plt.cm.coolwarm, linewidth=0, antialiased=False)
W1 = np.random.randn(2, 3)
W2 = np.random.randn(3, 1)
print("W1=", W1)
print("W2=", W2)
X, y = data, labels
# first layer weighted sum z
z = np.dot(X, W1)
# project z through non-linear sigmoid
z = sigmoid(z)
# do another dot product at end (sigmoid is omitted)
y_pred = np.dot(z, W2)
# what is our cost
error = cost(y_pred, y)
print('predicted %0.2f for example 0, actual %0.2f, total cost %0.2f'%(pred_y[0], y[0], error))
class Neural_Network(object):
def __init__(self, n0, n1, n2):
self.n0 = n0
self.n1 = n1
self.n2 = n2
# initialize weights
self.W1 = np.random.randn(self.n0, self.n1)
self.W2 = np.random.randn(self.n1 ,self.n2)
def predict(self, x):
z = np.dot(x, self.W1)
z = sigmoid(z)
y = np.dot(z, self.W2)
return y
net = Neural_Network(2, 3, 1)
X, y = data, labels
y_pred = net.predict(X)
error = cost(y_pred, y)
print('predicted %0.2f for example 0, actual %0.2f, total cost %0.2f'%(pred_y[0], y[0], error))
import itertools
def get_gradient(net, X, y):
w_delta = 1e-8
# get the current value of the loss, wherever the parameters are
y_pred_current = net.predict(X)
error_current = cost(y_pred_current, y)
# grab the current weights and copy them (so we can restore them after modification)
dw1, dw2 = np.zeros((net.n0, net.n1)), np.zeros((net.n1, net.n2))
W1, W2 = np.copy(net.W1), np.copy(net.W2)
# for the first layer, iterate through each weight,
# perturb it slightly, and calculate the numerical
# slope between that loss and the original loss
for i, j in itertools.product(range(net.n0), range(net.n1)):
net.W1 = np.copy(W1)
net.W1[i][j] += w_delta
y_pred = net.predict(X)
error = cost(y_pred, y)
dw1[i][j] = (error - error_current) / w_delta
# do the same thing for the second layer
for i, j in itertools.product(range(net.n1), range(net.n2)):
net.W2 = np.copy(W2)
net.W2[i][j] += w_delta
y_pred = net.predict(X)
error = cost(y_pred, y)
dw2[i][j] = (error - error_current) / w_delta
# restore the original weights
net.W1, net.W2 = np.copy(W1), np.copy(W2)
return dw1, dw2
# load the data and labels
X, y = data, labels.reshape((len(labels),1))
# it's always a good idea to normalize the data between 0 and 1
X = X/np.amax(X, axis=0)
y = y/np.amax(y, axis=0)
# create a 2x3x1 neural net
net = Neural_Network(2, 3, 1)
# what is the current cost?
y_orig = net.predict(X)
init_cost = cost(y_orig, y)
print("initial cost = %0.3f" % init_cost)
# Set the learning rate, and how many epochs (updates) to try
n_epochs = 2000
learning_rate = 0.01
# for each epoch, calculate the gradient, then subtract it from the parameters, and save the cost
errors = []
for i in range(n_epochs):
dw1, dw2 = get_gradient(net, X, y)
net.W1 = net.W1 - learning_rate * dw1
net.W2 = net.W2 - learning_rate * dw2
y_pred = net.predict(X)
error = cost(y_pred, y)
errors.append(error)
# plot it
plt.plot(range(0, len(errors)), errors, linewidth=2)
# what is the final cost?
y_pred = net.predict(X)
final_cost = cost(y_pred, y)
print("final cost = %0.3f" % final_cost)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Uncertainty and Modelling
Step2: Scatter plots
Step3: You might be tempted to plot a histogram of the model outputs. This shows how often a particular value occurs in the results, but given that we are only exploring the model variable ranges, don't read too much into this distribution.
Step4: Using SALib to run a Global Sensitivity Analysis
Step5: Define a problem file
Step6: Generate a Sample
Step7: Ranking
Step8: We can plot the results
Step9: A More Complicated Example
Step10: Screening
Step11: The results should look something like this
|
<ASSISTANT_TASK:>
Python Code:
from ipywidgets import widgets, interact
from IPython.display import display
import seaborn as sbn
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from IPython.core.pylabtools import figsize
sbn.set_context("talk", font_scale=.8)
figsize(10, 8)
# The model used for this seminar is contained in the file model.py
from model import (cost_of_vehicle_to_grid, compute_profit,
annualized_capital_cost, battery_lifetime,
max_vehicle_power)
# Uncomment the next line and run this cell to view the model code in this notebook
# %load model.py
@interact(connector=widgets.FloatSlider(value=2.3, min=2.3, max=22, step=0.5),
battery_size=widgets.FloatSlider(value=24, min=10, max=100, step=5),
distance_driven=widgets.FloatSlider(value=0, min=0, max=100, step=5),
range_buffer=widgets.FloatSlider(value=0, min=0, max=100, step=10),
dispatch_time=widgets.FloatSlider(value=1.4, min=0.5, max=24, step=0.5))
def plot_power(connector: float, battery_size: float, distance_driven: float,
range_buffer: float, dispatch_time: float) -> float :
power = max_vehicle_power(connector,
battery_size,
distance_driven,
range_buffer,
dispatch_time
)
return print("The maximum power is {} kW".format(round(power, 2)))
def monte_carlo_large(data):
dispatch_time = 4
y = max_vehicle_power(data[0], data[1], data[2], data[3], data[6], data[4], data[5])
return y
number_sims = 1000
# Make some random data in the correct ranges
mc_connector = np.random.uniform(2.3, 22, number_sims)
mc_battery_size = np.random.uniform(50, 100, number_sims)
mc_distance_driven = np.random.uniform(0, 80, number_sims)
mc_range_buffer = np.random.uniform(0, 80, number_sims)
mc_driving_eff = np.random.uniform(2, 6, number_sims)
mc_inv_eff = np.random.uniform(0.87, 0.97, number_sims)
mc_dispatch_time = np.random.uniform(0.5, 24, number_sims)
data = np.array((mc_connector,
mc_battery_size,
mc_distance_driven,
mc_range_buffer,
mc_driving_eff,
mc_inv_eff,
mc_dispatch_time))
# Run the code
y = monte_carlo_large(data)
# Make some scatter plots to compare the results
plt.subplot(241)
plt.scatter(mc_connector, y)
plt.title("Connector size (kW)")
plt.ylabel("Max Power (kW)")
plt.subplot(242)
plt.scatter(mc_battery_size, y)
plt.title("Battery Size (kWh)")
# plt.ylabel("Max Power (kW)")
plt.subplot(243)
plt.scatter(mc_distance_driven, y)
plt.title("Distance Driven (km)")
# plt.ylabel("Max Power (kW)")
plt.subplot(244)
plt.scatter(mc_range_buffer, y)
plt.title("Range Buffer (km)")
# plt.ylabel("Max Power (kW)")
plt.subplot(245)
plt.scatter(mc_driving_eff, y)
plt.title("Driving Eff (kWh/km)")
plt.ylabel("Max Power (kW)")
plt.subplot(246)
plt.scatter(mc_inv_eff, y)
plt.title("Inverter Eff (%)")
# plt.ylabel("Max Power (kW)")
plt.subplot(247)
plt.scatter(mc_dispatch_time, y)
plt.title("Dispatch Time (hours)")
# plt.ylabel("Max Power (kW)")
plt.tight_layout()
# plt.savefig('scatter.png')
plt.hist(y)
plt.xlabel("Power (kW)")
plt.ylabel("Frequency")
from SALib.sample import morris as ms
from SALib.analyze import morris as ma
from SALib.plotting import morris as mp
morris_problem = {
# There are n variables
'num_vars': 7,
# These are their names
'names': ['conn', 'batt', 'dist', 'range',
'dri_eff', 'inv_eff', 'dispatch_time'],
# Plausible ranges over which we'll move the variables
'bounds': [[2.3, 22], # connection_power (kW)
[50, 100], # battery size (kWh)
[0, 80], # distance driven (km)
[0, 80], # range buffer (km)
[4,5.5], # driving efficiency (kWh/km)
[0.87,0.97], # inverter efficienct (%)
[0.5, 24] # dispatch time - hours of the day in which
# the energy is dispatched
],
# I don't want to group any of these variables together
'groups': None
}
number_of_trajectories = 1000
sample = ms.sample(morris_problem, number_of_trajectories, num_levels=4)
# Run the sample through the monte carlo procedure of the power model
output = monte_carlo_large(sample.T)
# Store the results for plotting of the analysis
Si = ma.analyze(morris_problem, sample, output, print_to_console=False)
print("{:20s} {:>7s} {:>7s} {:>7s}".format("Name", "mu", "mu_star", "sigma"))
for name, s1, st, mean in zip(morris_problem['names'],
Si['mu'],
Si['mu_star'],
Si['sigma']):
print("{:20s} {:=7.2f} {:=7.2f} {:=7.2f}".format(name, s1, st, mean))
fig, (ax1, ax2) = plt.subplots(1,2)
mp.horizontal_bar_plot(ax1, Si, {})
mp.covariance_plot(ax2, Si, {})
@interact(battery_size=widgets.FloatSlider(value=24, min=10, max=100, step=5),
battery_unit_cost=widgets.FloatSlider(value=350, min=100, max=400, step=50),
connector_power=widgets.FloatSlider(value=2.3, min=2.3, max=22, step=0.5),
lifetime_cycles=widgets.FloatSlider(value=2000, min=1000, max=10000, step=1000),
depth_of_discharge=widgets.FloatSlider(value=0.8, min=0.5, max=1.0, step=0.1),
electricity_price=widgets.FloatSlider(value=0.1, min=0.01, max=0.5, step=0.01),
purchased_energy_cost=widgets.FloatSlider(value=0.1, min=0.01, max=0.5, step=0.01),
capacity_price=widgets.FloatSlider(value=0.007, min=0.001, max=0.01, step=0.001),
round_trip_efficiency=widgets.FloatSlider(value=0.73, min=0.50, max=1.0, step=0.01),
cost_of_v2g_equip=widgets.FloatSlider(value=2000, min=100, max=5000, step=100),
discount_rate=widgets.FloatSlider(value=0.10, min=0.0, max=0.2, step=0.01),
economic_lifetime=widgets.FloatSlider(value=10, min=3, max=25, step=1),
ratio_dispatch_to_contract=widgets.FloatSlider(value=0.10, min=0.01, max=0.50, step=0.01),
distance_driven=widgets.FloatSlider(value=0, min=0, max=100, step=5),
range_buffer=widgets.FloatSlider(value=0, min=0, max=100, step=10),
hours_connected_per_day=widgets.FloatSlider(value=18, min=0.5, max=24, step=0.5))
def plot_profit(battery_size,
battery_unit_cost,
connector_power,
lifetime_cycles,
depth_of_discharge,
electricity_price,
purchased_energy_cost,
capacity_price,
round_trip_efficiency,
cost_of_v2g_equip,
discount_rate,
economic_lifetime,
distance_driven,
range_buffer,
ratio_dispatch_to_contract,
hours_connected_per_day):
profit, revenue, cost = compute_profit(battery_size,
battery_unit_cost,
connector_power,
lifetime_cycles,
depth_of_discharge,
electricity_price,
purchased_energy_cost,
capacity_price,
round_trip_efficiency,
cost_of_v2g_equip,
discount_rate,
economic_lifetime,
distance_driven,
range_buffer,
ratio_dispatch_to_contract,
hours_connected_per_day
)
return print("Profit £{} = £{} - £{}".format(np.round(profit,2), np.round(revenue, 2), np.round(cost,2) ))
from SALib.sample.saltelli import sample as ss
from SALib.analyze.sobol import analyze as sa
problem = {
# There are sixteen variables
'num_vars': 16,
# These are their names
'names': ['battery_size',
'battery_unit_cost',
'connector_power',
'lifetime_cycles',
'depth_of_discharge',
'electricity_price',
'purchased_energy_cost',
'capacity_price',
'round_trip_efficiency',
'cost_of_v2g_equip',
'discount_rate',
'economic_lifetime',
'distance_driven',
'range_buffer',
'ratio_dispatch_to_contract',
'hours_connected_per_day'],
# These are their plausible ranges over which we'll move the variables
'bounds': [
[10, 100],
[100, 400],
[2.3, 22],
[1000, 10000],
[0.5, 1.0],
[0.01, 0.2],
[0.01, 0.2],
[0.001, 0.01],
[0.65, 1.0],
[100, 5000],
[0.0, 0.2],
[3, 25],
[0, 100],
[0, 100],
[0.01, 0.50],
[0.5, 24],
],
# I don't want to group any of these variables together
'groups': None
}
sample = ss(problem, 1000, calc_second_order=False)
profit, revenue, cost = compute_profit(sample[:, 0], sample[:, 1], sample[:, 2],
sample[:, 3], sample[:, 4], sample[:, 5],
sample[:, 6], sample[:, 7], sample[:, 8],
sample[:, 9], sample[:, 10], sample[:, 11],
sample[:, 12], sample[:, 13], sample[:, 14],
sample[:, 15])
SI = sa(problem, profit, parallel=True, n_processors=4, calc_second_order=False, print_to_console=False)
print("{:28s} {:>5s} {:>5s} {:>12s}".format("Name", "1st", "Total", "Mean of Input"))
for name, s1, st, mean in zip(problem['names'], SI['S1'], SI['ST'], sample.mean(axis=0)):
print("{:28s} {:=5.2f} {:=5.2f} ({:=12.2f})".format(name, s1, st, mean))
print("\nST sums to {:.2f}".format(SI['ST'].sum()))
@interact(battery_size=widgets.FloatSlider(value=70, min=10, max=100, step=5),
purchased_energy_cost=widgets.FloatSlider(value=0.1, min=0.01, max=0.5, step=0.01),
cost_of_v2g_equip=widgets.FloatSlider(value=2000, min=100, max=5000, step=100),
economic_lifetime=widgets.FloatSlider(value=10, min=3, max=25, step=1),
ratio_dispatch_to_contract=widgets.FloatSlider(value=0.10, min=0.01, max=0.50, step=0.01),
lifetime_cycles=widgets.FloatSlider(value=2000, min=1000, max=10000, step=500))
def plot_profit(battery_size,
purchased_energy_cost,
cost_of_v2g_equip,
economic_lifetime,
ratio_dispatch_to_contract,
lifetime_cycles):
profit, revenue, cost = compute_profit(lifetime_cycles=lifetime_cycles,
battery_size=battery_size,
purchased_energy_cost=purchased_energy_cost,
cost_of_v2g_equip=cost_of_v2g_equip,
economic_lifetime=economic_lifetime,
ratio_dispatch_to_contract=ratio_dispatch_to_contract
)
return print("Profit £{} = £{} - £{}".format(np.round(profit,2),
np.round(revenue, 2),
np.round(cost,2) ))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-2', 'sandbox-2', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'mpi-esm-1-2-hr', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Check out your predictions
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : (1/(1+np.exp(-x))) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(error, self.weights_hidden_to_output.T)
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error #* final_outputs * (1-final_outputs)
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:, None]
# Weight step (hidden to output)
#delta_weights_h_o += np.dot(hidden_outputs[:, None], output_error_term[None, :])
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
#output_error_term * hidden_outputs
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328],[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
out = network.run(inputs)
self.assertTrue(np.allclose(out, 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
### Set the hyperparameters here ###
# epoc - 100-3000, hidden_nodes: 10-56*2, learning rate: 0.01 -0.1
iterations = 4000
learning_rate = 0.8
hidden_nodes = 16
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
##plot them
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim(0,2)
##predict
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Wavelength Response
Step2: Run the SunPy calculation.
Step3: Plot the results against each other.
Step4: Now, do a "residual plot" of the differences between the two results.
Step5: Now, zooming in on the two spikes in the 335 and 304 $\mathrm{\mathring{A}}$ channels...
Step6: It looks like there is contamination from the 94 $\mathrm{\mathring{A}}$ channel in the 304 $\mathrm{\mathring{A}}$ channel and contamination from 131 $\mathrm{\mathring{A}}$ in the 335 $\mathrm{\mathring{A}}$ channel. Why? Is this a mistake or just something we haven't accounted for in the SunPy calculation?
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import sunpy.instr.aia
%matplotlib inline
data = np.loadtxt('../aia_sample_data/aia_wresponse_raw.dat')
channels = [94,131,171,193,211,304,335]
ssw_results = {}
for i in range(len(channels)):
ssw_results[channels[i]] = {'wavelength':data[:,0],
'response':data[:,i+1]}
response = sunpy.instr.aia.Response(path_to_genx_dir='../ssw_aia_response_data/')
response.calculate_wavelength_response()
fig,axes = plt.subplots(3,3,figsize=(12,12))
for c,ax in zip(channels,axes.flatten()):
#ssw
ax.plot(ssw_results[c]['wavelength'],ssw_results[c]['response'],
color=response.channel_colors[c],label='ssw')
#sunpy
ax.plot(response.wavelength_response[c]['wavelength'],response.wavelength_response[c]['response'],
color=response.channel_colors[c],marker='.',ms=12,label='SunPy')
if c!=335 and c!=304:
ax.set_xlim([c-20,c+20])
ax.set_title('{} $\mathrm{{\mathring{{A}}}}$'.format(c),fontsize=20)
ax.set_xlabel(r'$\lambda$ ({0:latex})'.format(response.wavelength_response[c]['wavelength'].unit),fontsize=20)
ax.set_ylabel(r'$R_i(\lambda)$ ({0:latex})'.format(response.wavelength_response[c]['response'].unit),fontsize=20)
axes[0,0].legend(loc='best')
plt.tight_layout()
fig,axes = plt.subplots(3,3,figsize=(12,12),sharey=True,sharex=True)
for c,ax in zip(channels,axes.flatten()):
#ssw
ax2 = ax.twinx()
ssw_interp = ssw_results[c]['response']*response.wavelength_response[c]['response'].unit
delta_response = np.fabs(response.wavelength_response[c]['response'] - ssw_interp)/(ssw_interp)
ax.plot(response.wavelength_response[c]['wavelength'],delta_response,color=response.channel_colors[c])
ax2.plot(response.wavelength_response[c]['wavelength'],response.wavelength_response[c]['response'],
color='k',linestyle='--')
ax.set_title('{} $\mathrm{{\mathring{{A}}}}$'.format(c),fontsize=20)
ax.set_xlabel(r'$\lambda$ ({0:latex})'.format(response.wavelength_response[c]['wavelength'].unit),fontsize=20)
ax.set_ylabel(r'$\frac{|\mathrm{SSW}-\mathrm{SunPy}|}{\mathrm{SSW}}$',fontsize=20)
ax2.set_ylabel(r'$R_i(\lambda)$ ({0:latex})'.format(response.wavelength_response[c]['response'].unit))
ax.set_ylim([-1.1,1.1])
plt.tight_layout()
fig,axes = plt.subplots(1,2,figsize=(10,5))
for c,ax in zip([304,335],axes.flatten()):
#ssw
ax.plot(ssw_results[c]['wavelength'],ssw_results[c]['response'],
color=response.channel_colors[c],label='ssw')
#sunpy
ax.plot(response.wavelength_response[c]['wavelength'],response.wavelength_response[c]['response'],
color=response.channel_colors[c],marker='.',ms=12,label='SunPy')
if c==304:
ax.set_xlim([80,100])
if c==335:
ax.set_xlim([120,140])
ax.set_title('{} $\mathrm{{\mathring{{A}}}}$'.format(c),fontsize=20)
ax.set_xlabel(r'$\lambda$ ({0:latex})'.format(response.wavelength_response[c]['wavelength'].unit),fontsize=20)
ax.set_ylabel(r'$R_i(\lambda)$ ({0:latex})'.format(response.wavelength_response[c]['response'].unit),fontsize=20)
axes[0].legend(loc='best')
plt.tight_layout()
fig,axes = plt.subplots(1,2,figsize=(10,5))
for ax,c in zip(axes.flatten(),[304,335]):
wvl_response = response._channel_info[c]['effective_area']*response._calculate_system_gain(c)
ax.plot(response.wavelength_response[c]['wavelength'],
wvl_response,'--',color=response.channel_colors[c],label=r'EA from SSW')
ax.plot(response.wavelength_response[c]['wavelength'],
response.wavelength_response[c]['response'],'.',ms=12,color=response.channel_colors[c],
label=r'SunPy')
if c==304:
ax.set_xlim([80,100])
if c==335:
ax.set_xlim([120,140])
ax.set_title('{} $\mathrm{{\mathring{{A}}}}$'.format(c),fontsize=20)
ax.set_xlabel(r'$\lambda$ ({0:latex})'.format(response.wavelength_response[c]['wavelength'].unit),fontsize=20)
ax.set_ylabel(r'$R_i(\lambda)$ ({0:latex})'.format(response.wavelength_response[c]['response'].unit),fontsize=20)
axes[0].legend(loc='best')
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Example
Step4: Mixed Text
Step5: If the text contains snippets from different languages, the detector is able to find the most probable langauges used in the text.
Step6: To take a closer look, we can inspect the text line by line, notice that the confidence in the detection went down for the first line
Step7: Best Effort Strategy
Step8: In case, that the detection is not reliable even when we are using the best effort strategy, an exception UnknownLanguage will be thrown.
Step9: Such an exception may not be desirable especially for trivial cases like characters that could belong to so many languages.
Step10: Command Line
Step11: The subcommand detect tries to identify the language code for each line in a text file.
Step12: Supported Languages
|
<ASSISTANT_TASK:>
Python Code:
from polyglot.detect import Detector
arabic_text = u
أفاد مصدر امني في قيادة عمليات صلاح الدين في العراق بأن " القوات الامنية تتوقف لليوم
الثالث على التوالي عن التقدم الى داخل مدينة تكريت بسبب
انتشار قناصي التنظيم الذي يطلق على نفسه اسم "الدولة الاسلامية" والعبوات الناسفة
والمنازل المفخخة والانتحاريين، فضلا عن ان القوات الامنية تنتظر وصول تعزيزات اضافية ".
detector = Detector(arabic_text)
print(detector.language)
mixed_text = u
China (simplified Chinese: 中国; traditional Chinese: 中國),
officially the People's Republic of China (PRC), is a sovereign state located in East Asia.
for language in Detector(mixed_text).languages:
print(language)
for line in mixed_text.strip().splitlines():
print(line + u"\n")
for language in Detector(line).languages:
print(language)
print("\n")
detector = Detector("pizza")
print(detector)
print(Detector("4"))
print(Detector("4", quiet=True))
!polyglot detect --help
!polyglot detect --input testdata/cricket.txt
from polyglot.utils import pretty_list
print(pretty_list(Detector.supported_languages()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: These are the polarization states
Step2: Devices
Step3: Example 1) Check that the $|H\rangle$ state is normalized
Step4: To show more information on an object, use the question mark after the function or object
Step5: Example 2) Converting from ket to bra
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from qutip import *
H = Qobj([[1],[0]])
V = Qobj([[0],[1]])
P45 = Qobj([[1/np.sqrt(2)],[1/np.sqrt(2)]])
M45 = Qobj([[1/np.sqrt(2)],[-1/np.sqrt(2)]])
R = Qobj([[1/np.sqrt(2)],[-1j/np.sqrt(2)]])
L = Qobj([[1/np.sqrt(2)],[1j/np.sqrt(2)]])
V
def HWP(theta):
return Qobj([[np.cos(2*theta),np.sin(2*theta)],[np.sin(2*theta),-np.cos(2*theta)]]).tidyup()
def LP(theta):
return Qobj([[np.cos(theta)**2,np.cos(theta)*np.sin(theta)],[np.sin(theta)*np.cos(theta),np.sin(theta)**2]]).tidyup()
def QWP(theta):
return Qobj([[np.cos(theta)**2 + 1j*np.sin(theta)**2,
(1-1j)*np.sin(theta)*np.cos(theta)],
[(1-1j)*np.sin(theta)*np.cos(theta),
np.sin(theta)**2 + 1j*np.cos(theta)**2]]).tidyup()
QWP(np.pi/4)
H.dag()*H
np.sin?
psi = Qobj([[1+1j],[2-1j]])
psi
psi.dag()
psi.dag().dag()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Strings are delimited by ", but can also use '. This is useful because you can now use one set of quotes inside another, and it'll still be one big string.
Step2: Variables don't need to be given a type, as Python is dynamically-typed. That means if I wanted to reuse s as an integer, Python would have no issue with that.
Step3: Single-line comments use #.
Step4: This statement is equivalent to saying s = s + 2**3, it's just shorthand. Also works with
Step5: The == operator is the comparison operator. Here, we also see Python's syntax for logical statements
Step6: Strings can be concatenated using the + operator. The len() function returns the length of a string.
Step7: Python accesses elements in lists from 0, not from 1 as in Matlab or R. This will be familiar to C and Java users.
Step8: Lists have several methods ( count, sort, reverse, pop, insert, remove, ... ). Here are a few.
Step9: Any thoughts on why len() is a global function in Python, and not a method of the list object ?
Step10: Note the indentation. Loops in Python don't get delimited by brackets like in C or R. Each block gets its own indentation.
Step11: The keyword in can also be used to check whether something is in a container
Step12: If you wanted to loop by indexing the list, we can use range(), which, in its simplest ( single-argument ) form, returns a list from 0 to that element minus 1.
Step13: Another way to do this is the enumerate function
Step14: What about if statements ?
Step15: Notice how the contents of the while loop are indented, and then code that is outside the loop continues unindented below.
Step16: Here, we used range() with two arguments. In Python 2, it generates a list from the first argument to the second argument minus 1. In Python 3, it returns an immutable iterable, but you can cast it to a list by calling something like list(range(5)). Also, note that we can feed the print function several things to print, separated by a comma.
Step17: More Lists
Step18: What are the middle three elements ? Let's use the
Step19: We don't need to give a start or an end
Step20: Can we access the last element ? What about the last two ?
Step21: Earlier, we saw that range() can take two arguments
Step22: The
Step23: What if I don't want to explicitly remember the size of the list ?
Step24: List arithmetic ?
Step25: Dictionaries
Step26: There are a couple of other built-in containers, like tuples and sets. I won't go into them here, plainly because I have to use them so rarely that it's not worth the time during the session. If you want to read up
Step27: Functions
Step28: Looks good. We've just defined a function that takes one argument, n, and returns something based on what n is. The Fibonacci function is quite particular because it calls itself ( recursion ), but it's a small, fun example, so why not.
Step29: Here, %d is a format code for integer. %f is for floating point numbers ( floats ), and %s is for strings.
Step30: Also worth knowing are \n and \t
Step31: File IO
Step32: Syntax Exercises
|
<ASSISTANT_TASK:>
Python Code:
print("He said, 'what ?'")
s = "This is a string."
print(s)
print(type(s))
print(len(s))
s = 42
print(s)
print(type(s))
print(s * 2)
print(s + 7)
# Neither statement modifies the variable.
s += 2**3 # s is being incremented by 2^3
print("Same as s = s + 2**3")
print(s)
print(s == 42)
print(s == 50)
print(s > 10)
x = "Blah"
print(x + x)
print(len(x))
mylist = [1, 2.41341]
mylist.append("We can mix types !")
print(mylist)
print(type(mylist))
print(mylist, "\n")
print(mylist[0])
print(mylist[1])
print(mylist[2])
print("Length is {} long.\n".format(len(mylist)))
print("There are {} ones in this list.\n".format(mylist.count(1)))
mylist.reverse()
print("Reversed ! {}".format(mylist))
for i in mylist :
print(i)
print("Hello\n")
print("Finished")
from __future__ import braces
print(1 in mylist)
print(2 in mylist)
for i in range(len(mylist)) :
print(i, mylist[i])
for index, value in enumerate(mylist) :
print("Element number {} in the list has the value {}".format(index, value))
x = 5
if x > 3 :
print("x is greater than 3.")
elif x == 5 :
print("We aren't going to see this. Why ?")
else :
print("x is not greater than 3.")
print("We can see this, it's not in the if statement.")
for outer in range(1, 3) :
print("BIG CLICK, outer loop change to {}".format(outer))
for inner in range(4) :
print("*little click*, outer is still {}, and inner is {}.".format(outer, inner))
print("I'm done here.")
myint = 2
myfloat = 3.14
print(type(myint), type(myfloat))
# Multiplying an int with a float gives a float : the int was promoted.
print(myint * myfloat)
print(type(myint * myfloat))
# A minor difference between Python 2 and Python 3 :
print(7 / 3)
# Py2 : 2
# Py3 : 2.3333
# In Python 2, operations between same type gives the same type :
print(type(7 / 3))
# Py2 : <type 'int'>
# Py3 : <class 'float'>
# Quick hack with ints to floats - there's no need to typecast, just give it a float
print(float(7) / 3)
print(7 / 3.0)
# In Python 3, this is handled "correctly"; you can use // as integer division
print(7 // 3)
# Quick note for Py2 users - see https://www.python.org/dev/peps/pep-0238/
from __future__ import division
print(7 / 3)
# Create a list of integers 0, 1, 2, 3, 4
A = list(range(5))
print(A)
# Py2 vs Py3 :
# In Py2, range() returns a list already
# Let's replace the middle element
A[2] = "Naaaaah"
print(A)
print(A[1:4])
print(A[:2])
print(A[2:])
print(A[len(A)-2:])
print(A[-2:])
print(list(range(0, 5, 2)))
print(A[0:5:2])
# Here, it will give us elements 0, 2, 4.
# This will simply go from start to finish with a stride of 2
print(A[::2])
# And this one, from the second element to finish, with a stride of 2
print(A[1::2])
# So, uh... Reverse ?
print(A[::-1])
print(A + A)
print(A * 3)
pythonPoints = { "Quentin" : 1./3, "Paul" : 42, "Matthew" : 1e3 }
print(pythonPoints)
# Dictionaries associate keys with values
print(pythonPoints.keys())
print(pythonPoints.values())
# You can access them through their keys
print(pythonPoints["Paul"] * 2)
if "Ruthie" in pythonPoints : # for dicts, "in" checks the keys
print("Ruthie's here too !")
else :
pythonPoints["Ruthie"] = 0
print("Ruthie has {} mad skillz.".format(pythonPoints["Ruthie"]))
# Let's build a list of elements 1^2, 2^2, ..., 5^2
y = [i**2 for i in range(6)]
print(y)
# Want to keep your index ? Use a dictionary.
squares = { x : x**2 for x in range(6) }
for key, val in squares.items() :
print("{} squared is {}".format(key, val))
# Also useful : zip()
# for key, val in zip(squares.keys(), squares.values()) :
# print("{} : {}".format(key, val))
# We can inline if statements too
print(42 if type(42) is int else 32)
# Note this is interpreted as
# print (something if a, else print something_else)
# and not
# (print something) if a, else (do something_else)
# Fibonacci numbers
# OH NO RECURSION
def fib(n) :
if n < 2 :
return n
else :
return fib(n-1) + fib(n-2)
print("Done defining.")
# Testing :
for i in range(10) :
print(fib(i))
def printFib(i) :
print("The {}th number of the Fibonnaci sequence is {}.".format(i, fib(i)))
printFib(20)
# I modified this one from Learn Python The Hard Way ( highly recommended ) :
formatstring = "Start {} {}"
print(formatstring.format(formatstring, formatstring))
# Written on-the-fly, because I got mad skills
print("This is a haiku\n\tI'm awful at poetry\nWait, this really worked")
myfile = open("example.txt", "r")
for line in myfile :
print(line.strip("\n"))
# There are other options instead of looping over each line.
# You can instead use myfile.read().
# Writing : you can dump a variable using myfile.write()
# after having opened it in "w" mode.
# There are many other ways to read and write files,
# including ways to read and write CSV directly.
A = list(range(4, 29, 3))
print(A)
B = [a**2 for a in A]
print(B)
B += B[::-1]
print(B)
def addflip(mylist) :
squared = [element**2 for element in mylist]
return squared + squared[::-1]
print(addflip(range(5)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Filter the data
Step2: Note that we have reduced our matrix from having 11,054 entries with all the zeros to only having 1,397 entries now!
Step3: Note that this plot is skewed by a few outliers. Lets limit it to anything where read_depth < 1000 and redraw the plot
Step4: When we zoom in, this doesn't look like a strong correlation. Note that there are a lot of data points here compared to the whole data set. In the data set excluding (0,0) we had 1,397 entries, and now we have 1,386 entries, so we only removed 9 values!
Step5: Removing the outliers
|
<ASSISTANT_TASK:>
Python Code:
#instantiate our environment
import os
import sys
%matplotlib inline
import pandas as pd
import statsmodels.api as sm
# read the data into a pandas dataframe
df = pd.read_csv("read_depth.strains.tsv", header=0, delimiter="\t")
print("Shape: {}".format(df.shape))
df.head()
dfa = df[(df["A_read_depth"] > 0) & (df["A_strains"] > 0)]
dfb = df[(df["B_read_depth"] > 0) & (df["B_strains"] > 0)]
dfc = df[(df["C_read_depth"] > 0) & (df["C_strains"] > 0)]
print("Shape: {}".format(dfa.shape))
dfa.head()
ax = dfa.plot('A_read_depth', 'A_strains', kind='scatter')
ax.set(ylabel="# strains", xlabel="read depth")
dfas = dfa[dfa['A_read_depth'] < 1000]
print("Shape: {}".format(dfas.shape))
ax = dfas.plot('A_read_depth', 'A_strains', kind='scatter')
ax.set(ylabel="# strains", xlabel="read depth")
model = sm.OLS(dfa['A_strains'], dfa['A_read_depth']).fit()
predictions = model.predict(dfa['A_read_depth'])
model.summary()
model = sm.OLS(dfas['A_strains'], dfas['A_read_depth']).fit()
predictions = model.predict(dfas['A_read_depth'])
model.summary()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Leitura dos últimos 30 registros
Step2: Faço a leitura dos últimos 30 registros no arquivo para mostrar em gráfico a evolução do consumo da bateria. Pode ser notado que fiquei alguns meses sem coletar informações da campainha por problema técnico com o gateway. Mesmo retornando o equipamento verifiquei que está bem baixo o consumo do dispositivo.
Step3: entry_id
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import csv
from altair import Chart, X, Y, Axis, SortField
import matplotlib.pyplot as plt
pd.__version__
%matplotlib inline
total = pd.read_csv("../data/database2017.csv")
total.shape
total.tail()
totallast30 = total.sort_values(by='created_at',ascending=False)[:30]
totallast30.plot(kind="bar", x=totallast30["entry_id"],
title="Consumo de energia dispositivo",
legend=True)
Chart(totallast30).mark_bar().encode(x="created_at", y="field1")
Chart(totallast30).mark_bar().encode(y='created_at', x='field1')
Chart(totallast30).mark_bar().encode(
x=X('field1'),
y=Y('created_at'),
color='entry_id')
Chart(totallast30).mark_bar().encode(
x=X('created_at:O',
axis=Axis(title='Project')),
y=Y('field1:Q',
axis=Axis(title='2014 Budget')),
color='entry_id')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can also express this query in multiple ways using aggregations
Step2: Annotate
Step3: Compare this to what we had before
Step4: Note
Step5: Select and Transmute
Step6: We can also create a new field that stores the age relative to the average. Note that new fields must be assigned a name (in this case mean_shifted_age)
Step7: transmute replaces any fields mentioned on the right-hand side with the new fields, but leaves unmentioned fields unchanged. transmute is useful for transforming data into a new form. Compare the following two snippts of code. The second is identical to the first with transmute replacing select.
Step8: Global Fields
|
<ASSISTANT_TASK:>
Python Code:
import hail as hl
hl.utils.get_movie_lens('data/')
users = hl.read_table('data/users.ht')
users.filter(users.occupation == 'programmer').count()
users.aggregate(hl.agg.filter(users.occupation == 'programmer', hl.agg.count()))
users.aggregate(hl.agg.counter(users.occupation == 'programmer'))[True]
missing_occupations = hl.set(['other', 'none'])
t = users.annotate(
cleaned_occupation = hl.cond(missing_occupations.contains(users.occupation),
hl.null('str'),
users.occupation))
t.show()
users.show()
users.describe()
users.select(users.sex, users.occupation).show()
mean_age = round(users.aggregate(hl.agg.stats(users.age)).mean)
users.select(users.sex, users.occupation, mean_shifted_age = users.age - mean_age).show()
missing_occupations = hl.set(['other', 'none'])
t = users.select(
cleaned_occupation = hl.cond(missing_occupations.contains(users.occupation),
hl.null('str'),
users.occupation))
t.show()
missing_occupations = hl.set(['other', 'none'])
t = users.transmute(
cleaned_occupation = hl.cond(missing_occupations.contains(users.occupation),
hl.null('str'),
users.occupation))
t.show()
t = users.annotate_globals(cohort = 5, cloudable = hl.set(['sample1', 'sample10', 'sample15']))
t.describe()
t.cloudable
hl.eval(t.cloudable)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First reload the data we generated in 1_notmnist.ipynb.
Step2: Reformat into a shape that's more adapted to the models we're going to train
Step3: We're first going to train a multinomial logistic regression using simple gradient descent.
Step4: Let's run this computation and iterate
Step5: Let's now switch to stochastic gradient descent training instead, which is much faster.
Step6: Let's run it
Step7: Problem
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random valued following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
##########################################################################
## Turning logistic regression to 1-hidden layer neural net with nn.relu
batch_size = 128
hidden_nodes = 1024
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Layer 1
l1_weights = tf.Variable(tf.truncated_normal([image_size * image_size, hidden_nodes]))
l1_biases = tf.Variable(tf.zeros([hidden_nodes]))
l1_logits = tf.matmul(tf_train_dataset, l1_weights) + l1_biases
l1_output = tf.nn.relu(l1_logits)
# Layer 2
l2_weights = tf.Variable(tf.truncated_normal([hidden_nodes, num_labels]))
l2_biases = tf.Variable(tf.truncated_normal([num_labels]))
l2_logits = tf.matmul(l1_output, l2_weights) + l2_biases
# Loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(l2_logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(l2_logits)
l1_logits = tf.matmul(tf_valid_dataset, l1_weights) + l1_biases
l1_output = tf.nn.relu(l1_logits)
l2_logits = tf.matmul(l1_output, l2_weights) + l2_biases
valid_prediction = tf.nn.softmax(l2_logits)
l1_logits = tf.matmul(tf_test_dataset, l1_weights) + l1_biases
l1_output = tf.nn.relu(l1_logits)
l2_logits = tf.matmul(l1_output, l2_weights) + l2_biases
test_prediction = tf.nn.softmax(l2_logits)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
print(train_labels.shape[0])
print(batch_data.shape)
print(batch_labels.shape)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 4.1.1 Visualizing the section force orbits
Step2: 4.1.2 Numerical solution of the equilibrium equations
Step3: The result shows that this numerical method does not preserve the Hamiltonian. Therefore, we try now with the implicit midpoint rule, which according to Hairer et al. (2006), p. 247 preserves quadratic invariants
Step4: This method yields an excellent result. However, if we are thinking in a formfinding problem of a pinned rod, we can't control that the endpoint of our solution falls on the endpoint of the rod. Therefore we will try another experiment using the recurrent step of the implicit midpoint rule.
Step5: We are testing if the interval values computed from each formula are consistent
Step6: The experiment succeeds; if we examin the result for $h$, we see that every formula produces exactly the same results in each step. We have a straightforward and simple method to compute section forces and rod length for a prescribed traction and Hamiltonian.
Step7: 4.2 Spatial case
Step8: Let's pack the code into a single tool and do a new experiment
Step9: We make a new experiment, this time with $F<\mathcal{H}$ which means that there are moments at the end sections
Step10: It is easy to observe that the solution is "rotated" compared to the expected one. The reason of this behavior is that no rotation has been considered at the start node, but there is rotation caused by the non-zero curvature at the start section. Let's modify the code to fix this issue. For this purpose we will add to the initial (reference) angle, half of the angle corresponding to the curvature at the start section, and we will take $h_0$ as averaging length for the curvature
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
EI = 5000. #kN m^2
H = 3600. #kN m/m
F1 = -2600. #kN
F2 = -3600. #kN
F3 = -4600. #kN
phi = np.linspace(np.pi, -np.pi, 501)
theta0 = np.arccos(H/F3)
phi3 = np.linspace(theta0, - theta0, 501)
N1 = F1*np.cos(phi)
Q1 = -F1*np.sin(phi)
M1 = -np.sqrt(2*EI*(H - N1)) # sign is consistent with the choice of sense for theta
N2 = F2*np.cos(phi)
Q2 = -F2*np.sin(phi)
M2 = -np.sqrt(2*EI*(H - N2))
N3 = F3*np.cos(phi3)
Q3 = -F3*np.sin(phi3)
M3 = -np.sqrt(2*EI*(H - N3))
fig = plt.figure(figsize=(9., 9.))
ax = fig.gca(projection='3d')
ax.plot(N1, Q1, M1, color='r')
ax.plot(N1, Q1, -M1, color='r')
ax.plot(N2, Q2, M2, color='g')
ax.plot(N2, Q2, -M2, color='g')
ax.plot(N3, Q3, M3, color='b')
ax.plot(N3, Q3, -M3, color='b')
ax.set_xlabel('$N$')
ax.set_ylabel('$V$')
ax.set_zlabel('$M$')
print('F= {0:.0f} kN, H = {1:.0f} kN m/m, theta_0 = {2:.5f} rad'.format(F3, H, theta0))
sectionForces = []
N0 = F3*np.cos(theta0)
Q0 = -F3*np.sin(theta0)
M0 = 0.
sectionForces.append([N0, Q0, M0])
nEdges = 50
h = 0.1
Nn = N0
Qn = Q0
Mn = M0
for n1 in range(1, nEdges+1):
Nn1 = Nn*np.cos(h*(Mn - h*Qn/4)/EI) + Qn*np.sin(h*(Mn - h*Qn/4)/EI)
Qn1 = Qn*np.cos(h*(Mn - h*Qn/4)/EI) - Nn*np.sin(h*(Mn - h*Qn/4)/EI)
Mn1 = Mn - 0.5*h*(Qn + Qn1)
sectionForces.append([Nn1, Qn1, Mn1])
Nn = Nn1
Qn = Qn1
Mn = Mn1
axialForce = np.array([force[0] for force in sectionForces])
shearForce = np.array([force[1] for force in sectionForces])
bendingMoment = np.array([force[2] for force in sectionForces])
H = bendingMoment**2/2/EI + axialForce
fig = plt.figure(figsize=(9., 9.))
ax = fig.gca(projection='3d')
ax.plot(N3, Q3, M3, color='b')
ax.plot(N3, Q3, -M3, color='b')
ax.scatter(axialForce, shearForce, bendingMoment, color='r')
ax.set_xlabel('$N$')
ax.set_ylabel('$Q$')
ax.set_zlabel('$M$')
fig = plt.figure(figsize=(7., 7.))
ax = fig.gca()
ax.plot(np.linspace(0, h*nEdges, nEdges+1), H)
ax.set_xlabel('$s$')
ax.set_ylabel('$\mathcal{H}$')
import scipy.optimize
def implicitMidpoint(Xn1, Xn, h, EI):
f = np.empty(3)
f[0] = Xn1[0] - Xn[0] - h*(Xn[1] + Xn1[1])*(Xn[2] + Xn1[2])/4./EI
f[1] = Xn1[1] - Xn[1] + h*(Xn[0] + Xn1[0])*(Xn[2] + Xn1[2])/4./EI
f[2] = Xn1[2] - Xn[2] + h*(Xn[1] + Xn1[1])/2.
return f
sectionForces = []
N0 = F3*np.cos(theta0)
Q0 = -F3*np.sin(theta0)
M0 = 0.
sectionForces.append([N0, Q0, M0])
nEdges = 50
h = 0.1
Xn = np.array([N0, Q0, M0])
for n1 in range(1, nEdges+1):
eqSystem = lambda Xn1: implicitMidpoint(Xn1, Xn, h, EI)
solution = scipy.optimize.root(eqSystem, Xn)
Xn1 = solution.x
sectionForces.append(Xn1)
Xn = Xn1
axialForce = np.array([force[0] for force in sectionForces])
shearForce = np.array([force[1] for force in sectionForces])
bendingMoment = np.array([force[2] for force in sectionForces])
H = bendingMoment**2/2/EI + axialForce
fig = plt.figure(figsize=(9., 9.))
ax = fig.gca(projection='3d')
ax.plot(N3, Q3, M3, color='b')
ax.plot(N3, Q3, -M3, color='b')
ax.scatter(axialForce, shearForce, bendingMoment, color='r')
ax.set_xlabel('$N$')
ax.set_ylabel('$Q$')
ax.set_zlabel('$M$')
fig = plt.figure(figsize=(7., 7.))
ax = fig.gca()
ax.plot(np.linspace(0, h*nEdges, nEdges+1), H)
ax.set_xlabel('$s$')
ax.set_ylabel('$\mathcal{H}$')
EI = 5000. #kN m^2
H = 3600. #kN m/m
F = -4600. #kN
theta0 = np.arccos(H/F)
theta = np.linspace(theta0, -theta0, nEdges+1)
N = F*np.cos(theta)
Q = -F*np.sin(theta)
M = -np.sqrt(2*EI*(H - N))
h = []
for n in range(nEdges):
hNn = 4*EI*(N[n+1] - N[n])/(Q[n] + Q[n+1])/(M[n] + M[n+1])
hQn = -4*EI*(Q[n+1] - Q[n])/(N[n] + N[n+1])/(M[n] + M[n+1])
hMn = -2*(M[n+1] - M[n])/(Q[n] + Q[n+1])
h.append([hNn, hQn, hMn])
# h
h = []
for n in range(nEdges):
hn = -2*(M[n+1] - M[n])/(Q[n] + Q[n+1])
h.append(hn)
print('The length of the rod is {:.3f} m'.format(np.sum(h)))
kappa = M / EI
phi = np.zeros(nEdges)
rotor = np.zeros(nEdges) + 1j*np.zeros(nEdges)
for n in range(1, nEdges):
phi[n] = 2.*np.arctan(kappa[n]*(h[n-1] + h[n])/4.)
rotor[n] = (4./(h[n-1] + h[n]) + 1j * kappa[n])/(4./(h[n-1] + h[n]) - 1j * kappa[n])
np.sum(phi)/2
phi[0] = theta0
rotor[0] = np.exp(1j*phi[0])
gamma = np.zeros(len(kappa)) + 1j*np.zeros(len(kappa))
gamma[0] = 0.+0j
gamma[1] = gamma[0] + h[0]*rotor[0]
for n in range(1, len(kappa)-1):
gamma[n+1] = gamma[n] + h[n]/h[n-1] * (gamma[n] - gamma[n-1]) * rotor[n]
fig = plt.figure(figsize=(9,9))
ax = fig.gca(aspect='equal')
ax.plot(gamma.real, gamma.imag, color='b')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
print('Total rotation at the orbit: {:.5f}*pi rad'.format((theta[-1] - theta[0])/np.pi))
print('Total rotation computed with DDG: {:.5f}*pi rad'.format((np.sum(phi) - phi[0])/np.pi))
EI = 5000. #kN m^2
H = 3600. #kN m/m
F = -4600. #kN
nEdges = 200
nVertex = nEdges + 1
theta0 = np.arccos(H/F)
theta = np.linspace(theta0, -theta0, nVertex)
N = F*np.cos(theta)
Q = -F*np.sin(theta)
M = -np.sqrt(2*EI*(H - N))
h = np.zeros(nEdges)
h[:] = -2*(M[1:] - M[0:-1])/(Q[0:-1] + Q[1:]) # pythonic looping
print('The length of the rod is {:.3f} m'.format(np.sum(h)))
kappa = M / EI
phi = np.zeros(nVertex-1)
rotor = np.zeros(nVertex-1) + 1j*np.zeros(nVertex-1)
phi[0] = theta0
phi[1:] = 2.*np.arctan(kappa[1:-1]*(h[0:-1] + h[1:])/4.)
rotor[0] = np.exp(1j*phi[0])
rotor[1:] = (4./(h[0:-1] + h[1:]) + 1j * kappa[1:-1])/(4./(h[0:-1] + h[1:]) - 1j * kappa[1:-1])
gamma = np.zeros(nVertex) + 1j*np.zeros(nVertex)
gamma[0] = 0.+0j
gamma[1] = gamma[0] + h[0]*rotor[0]
for n in range(1, nVertex-1):
gamma[n+1] = gamma[n] + h[n]/h[n-1] * (gamma[n] - gamma[n-1]) * rotor[n]
fig = plt.figure(figsize=(9,9))
ax = fig.gca(aspect='equal')
ax.plot(gamma.real, gamma.imag, color='b')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
print('Total rotation at the orbit: {:.5f}*pi rad'.format((theta[-1] - theta[0])/np.pi))
print('Total rotation computed with DDG: {:.5f}*pi rad'.format((np.sum(phi) - phi[0])/np.pi))
EI = 5000. #kN m^2
H = 3600. #kN m/m
F = -3500. #kN
nEdges = 50
nVertex = nEdges + 1
if F > H:
theta0 = np.arccos(H/F)
else:
theta0 = np.pi
theta = np.linspace(theta0, -theta0, nVertex)
N = F*np.cos(theta)
Q = -F*np.sin(theta)
M = -np.sqrt(2*EI*(H - N))
h = np.zeros(nEdges)
h[:] = -2*(M[1:] - M[0:-1])/(Q[0:-1] + Q[1:])
kappa = M / EI
phi = np.zeros(nVertex-1)
rotor = np.zeros(nVertex-1) + 1j*np.zeros(nVertex-1)
phi[0] = theta0
phi[1:] = 2.*np.arctan(kappa[1:-1]*(h[0:-1] + h[1:])/4.)
rotor[0] = np.exp(1j*phi[0])
rotor[1:] = (4./(h[0:-1] + h[1:]) + 1j * kappa[1:-1])/(4./(h[0:-1] + h[1:]) - 1j * kappa[1:-1])
gamma = np.zeros(nVertex) + 1j*np.zeros(nVertex)
gamma[0] = 0.+0j
gamma[1] = gamma[0] + h[0]*rotor[0]
for n in range(1, nVertex-1):
gamma[n+1] = gamma[n] + h[n]/h[n-1] * (gamma[n] - gamma[n-1]) * rotor[n]
fig = plt.figure(figsize=(9,9))
ax = fig.gca(aspect='equal')
ax.plot(gamma.real, gamma.imag, color='b')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
print('Total rotation at the orbit: {:.5f}*pi rad'.format((theta[-1] - theta[0])/np.pi))
print('Total rotation computed with DDG: {:.5f}*pi rad'.format((np.sum(phi) - phi[0])/np.pi))
EI = 5000. #kN m^2
H = 3600. #kN m/m
F = -3500. #kN
nEdges = 50
nVertex = nEdges + 1
if F > H:
theta0 = np.arccos(H/F)
else:
theta0 = np.pi
theta = np.linspace(theta0, -theta0, nVertex)
N = F*np.cos(theta)
Q = -F*np.sin(theta)
M = -np.sqrt(2*EI*(H - N))
h = np.zeros(nEdges)
h[:] = -2*(M[1:] - M[0:-1])/(Q[0:-1] + Q[1:])
kappa = M / EI
phi = np.zeros(nVertex-1)
rotor = np.zeros(nVertex-1) + 1j*np.zeros(nVertex-1)
phi[0] = theta0 + np.arctan(h[0]*kappa[0]/2) # modified code line
phi[1:] = 2.*np.arctan(kappa[1:-1]*(h[0:-1] + h[1:])/4.)
rotor[0] = np.exp(1j*phi[0])
rotor[1:] = (4./(h[0:-1] + h[1:]) + 1j * kappa[1:-1])/(4./(h[0:-1] + h[1:]) - 1j * kappa[1:-1])
gamma = np.zeros(nVertex) + 1j*np.zeros(nVertex)
gamma[0] = 0.+0j
gamma[1] = gamma[0] + h[0]*rotor[0]
for n in range(1, nVertex-1):
gamma[n+1] = gamma[n] + h[n]/h[n-1] * (gamma[n] - gamma[n-1]) * rotor[n]
fig = plt.figure(figsize=(9,9))
ax = fig.gca(aspect='equal')
ax.plot(gamma.real, gamma.imag, color='b')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
print('The y coordinate at the end section is now y = {:.7f} m'.format(gamma[-1].imag))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Count number of tweets per day for every news, calculate cummulative diffusion
Step2: Plot diffusion for every day for all news together
Step3: Plot cummulative diffusion of all news together
Step4: Plot cummulative diffusion for every news headline
Step5: Average diffusion per day for all news
Step6: The same graph but in logrithmic scale
Step7: Calculate and plot standart deviation
Step8: Calculate and share of values inside one standard deviation for every day
Step9: Store average diffusion data on hard drive to use by another jupyter notebook
Step10: Plot average diffusion for both real and fake news on one graph
Step11: In logarithmic scale
Step12: Calculate average diffusion duration (number of days until difussion is dead)
|
<ASSISTANT_TASK:>
Python Code:
client = pymongo.MongoClient("46.101.236.181")
db = client.allfake
# get collection names
collections = sorted([collection for collection in db.collection_names()])
day = {} # number of tweets per day per collection
diff = {} # cumullative diffusion on day per colletion
for collection in collections:
# timeframe
relevant_from = db[collection].find().sort("timestamp", pymongo.ASCENDING).limit(1)[0]['timestamp']
relevant_till = db[collection].find().sort("timestamp", pymongo.DESCENDING).limit(1)[0]['timestamp']
i = 0
day[collection] = [] # number of tweets for every collection for every day
diff[collection] = [] # cummulative diffusion for every collection for every day
averagediff = [] # average diffusion speed for every day for all news
d = relevant_from
delta = datetime.timedelta(days=1)
while d <= relevant_till:
# tweets per day per collection
day[collection].append(db[collection].find({"timestamp":{"$gte": d, "$lt": d + delta}}).count())
# cummulative diffusion per day per collection
if i == 0:
diff[collection].append( day[collection][i] )
else:
diff[collection].append( diff[collection][i-1] + day[collection][i] )
d += delta
i += 1
# the longest duration of diffusion among all news headlines
max_days = max([len(day[coll]) for coll in \
[days_col for days_col in day] ])
summ_of_diffusions = [0] * max_days # summary diffusion for every day
# calculate summary diffusion for every day
for d in range(max_days):
for c in collections:
# if there is an entry for this day for this collection, add its number of tweets to the number of this day
if d < len(day[c]):
summ_of_diffusions[d] += day[c][d]
plt.step(range(len(summ_of_diffusions)),summ_of_diffusions, 'g')
plt.xlabel('Day')
plt.ylabel('Number of tweets')
plt.title('Diffusion of all fake news together')
plt.show()
summ_of_diffusions_cumulative = [0] * max_days
summ_of_diffusions_cumulative[0] = summ_of_diffusions[0]
for d in range(1, max_days):
summ_of_diffusions_cumulative[d] += summ_of_diffusions_cumulative[d-1] + summ_of_diffusions[d]
plt.step(range(len(summ_of_diffusions_cumulative)),summ_of_diffusions_cumulative, 'g')
plt.xlabel('Day')
plt.ylabel('Cummulative number of tweets')
plt.title('Cummulative diffusion of all fake news together')
plt.show()
for collection in collections:
plt.step([d+1 for d in range(len(diff[collection]))], diff[collection])
plt.xlabel('Day')
plt.ylabel('Cumulative tweets number')
plt.title('Cumulative diffusion for fake news headlines')
plt.show()
averagediff = [0 for _ in range(max_days)] # average diffusion per day
for collection in collections:
for i,d in enumerate(day[collection]):
averagediff[i] += d / len(collections)
plt.xlabel('Day')
plt.ylabel('Average number of tweets')
plt.step(range(1,len(averagediff)+1),averagediff, 'r')
plt.title('Average diffusion of fake news')
plt.show()
plt.yscale('log')
plt.xlabel('Day')
plt.ylabel('Average number of tweets')
plt.step(range(1,len(averagediff)+1),averagediff, 'r')
plt.show()
# export some data to another notebook
averagediff_fake = averagediff
%store averagediff_fake
avgdiff_std = [0 for _ in range(max_days)] # standard deviation for every day for all collections
number_tweets = [[] for _ in range(max_days)] # number of tweets for every day for every collection
for d in range(max_days):
for c in collections:
# if there is an entry for this day for this collection
if d < len(day[c]):
# add number of tweets for this day for this colletion to the number_tweets for this day
number_tweets[d].append(day[c][d])
# calculate standard deviation for this day
avgdiff_std[d] = np.std(number_tweets[d])
plt.ylabel('Standart deviation for average number of tweets per day')
plt.xlabel('Day')
plt.step(range(1,len(avgdiff_std)+1),avgdiff_std, 'r')
plt.title('Standard deviation for fake news average')
plt.show()
inside_std = [0 for _ in range(max_days)] # number of values inside one standard deviation for every day
inside_std_share = [0 for _ in range(max_days)] # share of values inside one standard deviation for every day
for d in range(max_days):
for c in collections:
# set borders of mean plusminus one std
lowest = averagediff[d] - avgdiff_std[d]
highest = averagediff[d] + avgdiff_std[d]
# if there is entray for this day for this collection and its value is inside the borderes
if d < len(day[c]) and (day[c][d] >= lowest and day[c][d] <= highest):
# increment number of values inside one std for this day
inside_std[d] += 1
# calculate the share of values inside one std for this day
inside_std_share[d] = inside_std[d] / float(len(number_tweets[d]))
plt.ylabel('Percent of values in 1 std from average')
plt.xlabel('Day')
plt.scatter(range(1,len(inside_std_share)+1),inside_std_share, c='r')
plt.title('Percentage of values inside the range\n of one standard deviation from mean for fake news')
plt.show()
averagediff_fake = averagediff
%store averagediff_fake
%store -r averagediff_real
plt.xlabel('Day')
plt.ylabel('Average number of tweets')
plt.step(range(1,len(averagediff)+1),averagediff, 'r', label="fake news")
plt.step(range(1,len(averagediff_real)+1),averagediff_real, 'g', label="real news")
plt.legend()
plt.title('Average diffusion for both types of news')
plt.show()
plt.ylabel('Average number of tweets')
plt.xlabel('Day')
plt.yscale('log')
plt.step(range(1,len(averagediff)+1),averagediff, 'r', label="fake news")
plt.step(range(1,len(averagediff_real)+1),averagediff_real, 'g', label="real news")
plt.legend()
plt.title('Average diffusion for both types of news in logarithmic scale')
plt.show()
diffDurationAvg = 0; # average duration of diffusion
durations = [len(day[col]) for col in collections] # all durations
diffDurationAvg = np.mean(durations) # mean duration
diffDurationAvg_std = np.std(durations) # standard deviation for the mean
print "Average diffusion duration: %.2f days" % diffDurationAvg
print "Standard deviation: %.2f days" % diffDurationAvg_std
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Even Fibonacci numbers
Step2: Largest prime factor
Step3: Largest palindrome product
Step4: Smallest multiple
Step5: Sum square difference
Step6: 10001st prime
Step7: Largest product in a series
Step8: Special Pythagorean triplet
Step9: Summation of primes
|
<ASSISTANT_TASK:>
Python Code:
from euler import Seq, timer
def p001():
return (
range(1000)
>> Seq.filter(lambda n: (n%3==0) | (n%5==0))
>> Seq.sum)
timer(p001)
from euler import Seq, timer
def p002():
return (
Seq.unfold(lambda (a,b): (b, (b, b+a)), (0,1))
>> Seq.filter(lambda n: n%2==0)
>> Seq.takeWhile(lambda n: n<4000000)
>> Seq.sum)
timer(p002)
from euler import Seq, FactorInteger, fst, timer
def p003():
return FactorInteger(600851475143) >> Seq.map(fst) >> Seq.max
timer(p003)
from euler import Seq, timer
def p004():
return (
range(100, 1000)
>> Seq.collect(lambda a: range(a, 1000)
>> Seq.filter(lambda b: str(a*b)[::-1] == str(a*b))
>> Seq.map(lambda b: a*b))
>> Seq.max)
timer(p004)
from euler import Seq, timer
def p004():
return (
[a*b for a in range(100, 1000)
for b in range(a, 1000)
if str(a*b)[::-1] == str(a*b)]
>> Seq.max)
timer(p004)
from euler import Seq, LCM, timer
def p005():
return (range(1,21) >> Seq.reduce(LCM))
timer(p005)
from euler import timer, Seq
def p006():
return (range(101) >> Seq.sum) ** 2 - (range(101) >> Seq.sumBy(lambda i: i**2))
timer(p006)
from euler import timer
def p006():
return sum(range(101)) ** 2 - sum(i ** 2 for i in range(101))
timer(p006)
from euler import prime, timer
def p007():
return prime(10000)
timer(p007)
from euler import Seq, timer
def p007():
return (
"".join(open('data/p008.txt').read().splitlines())
>> Seq.window(5)
>> Seq.map(lambda s: s >> Seq.map(int) >> Seq.product)
>> Seq.max)
timer(p007)
from euler import timer, Seq
def p009():
return(
range(1,999)
>> Seq.collect(lambda a: range(a, 1000-a)
>> Seq.filter(lambda b: (a**2 + b**2 == (1000-a-b)**2))
>> Seq.map(lambda b: a*b*(1000-a-b)))
>> Seq.head)
timer(p009)
def p009():
return (
[a*b*(1000-a-b)
for a in range(1,999)
for b in range(a, 1000-a)
if (a ** 2 + b ** 2 == (1000-a-b) ** 2)][0])
timer(p009)
from euler import Seq, primes, timer
def p010():
return (
primes()
>> Seq.takeWhile(lambda n: n<2000000)
>> Seq.sum)
timer(p010)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Des données synthétiques
Step2: Exercice 1
Step3: Exercice 2
Step4: Exercice 3
Step5: Le coefficient $R^2$ est plus élevé car on utilise les mêmes variables que le modèle. Il n'est théoriquement pas possible d'aller au delà.
Step6: Le modèle linéaire est le meilleur modèle dans notre cas puisque les données ont été construites de la sorte. Il est attendu que le $R^2$ ne soit pas plus élevé tout du moins pas significativement plus élevé. On regarde avec les features polynômiales...
Step7: Avant de tirer des conclusions hâtives, il faudrait recommencer plusieurs fois l'expérience avant de dire que la performance est plus ou moins élevée avec ces features ce que ce notebook ne fera pas puisque la réponse théorique est connue dans ce cas.
Step8: Le seul modèle qui s'en tire vraiment est la régression linéaire avec les features polynômiales. Comme il équivaut au modèle théorique, il est normal qu'il ne se plante pas trop même si ses coefficients ne sont pas identique au modèle théorique (il faudrait plus de données pour que cela converge).
Step9: Pour les autes modèles, voyons déjà visuellement ce qu'il se passe.
Step10: Le graphe étudie les variables des modèles selon une coordonnées tout en restreignant l'autre dans un intervalle donné. On voit tout de suite que la forêt aléatoire devient constante au delà d'un certain seuil. C'est encore une fois tout à fait normal puisque la base d'apprentissage ne contient des $X_1, X_2$ que dans l'intervalle $[0, 1]$. En dehors, chaque arbre de décision produit une valeur constante tout simplement parce que ce sont des fonctions en escalier
Step11: Exercice 8
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
%matplotlib inline
from numpy import random
n = 1000
X = random.rand(n, 2)
X[:5]
y = X[:, 0] * 3 - 2 * X[:, 1] ** 2 + random.rand(n)
y[:5]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(X_train, y_train)
from sklearn.metrics import r2_score
score = r2_score(y_test, reg.predict(X_test))
score
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures()
poly.fit(X_train)
X_train2 = poly.transform(X_train)
reg2 = LinearRegression()
reg2.fit(X_train2, y_train)
score2 = r2_score(y_test, reg2.predict(poly.transform(X_test)))
score2
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
r2_score(y_test, rf.predict(X_test))
rf2 = RandomForestRegressor()
rf2.fit(X_train2, y_train)
r2_score(y_test, rf2.predict(poly.transform(X_test)))
X_test2 = random.rand(n, 2) + 0.5
y_test2 = X_test2[:, 0] * 3 - 2 * X_test2[:, 1] ** 2 + random.rand(n)
res = []
for model in [reg, reg2, rf, rf2]:
name = model.__class__.__name__
try:
pred = model.predict(X_test)
pred2 = model.predict(X_test2)
except Exception:
pred = model.predict(poly.transform(X_test))
pred2 = model.predict(poly.transform(X_test2))
name += " + X^2"
res.append(dict(name=name, r2=r2_score(y_test, pred),
r2_jeu2=r2_score(y_test2, pred2)))
import pandas
df = pandas.DataFrame(res)
df
reg2.coef_, reg2.intercept_
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(14, 4))
a, b = 0.9, 1.1
index1 = (X_test2[:, 0] >= a) & (X_test2[:, 0] <= b)
index2 = (X_test2[:, 1] >= a) & (X_test2[:, 1] <= b)
yth = X_test2[:, 0] * 3 - 2 * X_test2[:, 1]
ax[0].set_xlabel("X1")
ax[0].set_ylabel("Y")
ax[0].plot(X_test2[index2, 0], yth[index2], '.', label='Y théorique')
ax[1].set_xlabel("X2")
ax[1].set_ylabel("Y")
ax[1].plot(X_test2[index1, 1], yth[index1], '.', label='Y théorique')
for model in [reg, reg2, rf, rf2]:
name = model.__class__.__name__
try:
pred2 = model.predict(X_test2)
except Exception:
pred2 = model.predict(poly.transform(X_test2))
name += " + X^2"
ax[0].plot(X_test2[index2, 0], pred2[index2], '.', label=name)
ax[1].plot(X_test2[index1, 1], pred2[index1], '.', label=name)
ax[0].legend()
ax[1].legend();
from sklearn.tree import DecisionTreeRegressor
res = []
for md in range(1, 20):
tree = DecisionTreeRegressor(max_depth=md)
tree.fit(X_train, y_train)
r2_train = r2_score(y_train, tree.predict(X_train))
r2_test = r2_score(y_test, tree.predict(X_test))
res.append(dict(profondeur=md, r2_train=r2_train, r2_test=r2_test))
df = pandas.DataFrame(res)
df.head()
ax = df.plot(x='profondeur', y=['r2_train', 'r2_test'])
ax.set_title("Evolution du R2 selon la profondeur");
from sklearn.linear_model import Ridge, Lasso
import numpy.linalg as nplin
import numpy
def coef_non_nuls(coef):
return sum(numpy.abs(coef) > 0.001)
res = []
for d in range(1, 21):
poly = PolynomialFeatures(degree=d)
poly.fit(X_train)
X_test2 = poly.transform(X_test)
reg = LinearRegression()
reg.fit(poly.transform(X_train), y_train)
r2_reg = r2_score(y_test, reg.predict(X_test2))
rid = Ridge(alpha=10)
rid.fit(poly.transform(X_train), y_train)
r2_rid = r2_score(y_test, rid.predict(X_test2))
las = Lasso(alpha=0.01)
las.fit(poly.transform(X_train), y_train)
r2_las = r2_score(y_test, las.predict(X_test2))
res.append(dict(degre=d, nb_features=X_test2.shape[1],
r2_reg=r2_reg, r2_las=r2_las, r2_rid=r2_rid,
norm_reg=nplin.norm(reg.coef_),
norm_rid=nplin.norm(rid.coef_),
norm_las=nplin.norm(las.coef_),
nnul_reg=coef_non_nuls(reg.coef_),
nnul_rid=coef_non_nuls(rid.coef_),
nnul_las=coef_non_nuls(las.coef_),
))
df = pandas.DataFrame(res)
df
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
df.plot(x="nb_features", y=["r2_reg", "r2_las", "r2_rid"], ax=ax[0])
ax[0].set_xlabel("Nombre de features")
ax[0].set_ylim([0, 1])
ax[0].set_title("r2")
df.plot(x="nb_features", y=["nnul_reg", "nnul_las", "nnul_rid"], ax=ax[1])
ax[1].set_xlabel("Nombre de features")
ax[1].set_title("Nombre de coefficients non nuls");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Session 5
Step2: <style> .rendered_html code {
Step3: Let's take a look at the first part of this
Step4: We'll just clean up the text a little. This isn't necessary, but can help the training along a little. In the example text I provided, there is a lot of white space (those \t's are tabs). I'll remove them. There are also repetitions of \n, new lines, which are not necessary. The code below will remove the tabs, ending whitespace, and any repeating newlines. Replace this with any preprocessing that makes sense for your dataset. Try to boil it down to just the possible letters for what you want to learn/synthesize while retaining any meaningful patterns
Step5: Now we can see how much text we have
Step6: In general, we'll want as much text as possible. But I'm including this just as a minimal example so you can explore your own. Try making a text file and seeing the size of it. You'll want about 1 MB at least.
Step7: And then create a mapping which can take us from the letter to an integer look up table of that letter (and vice-versa). To do this, we'll use an OrderedDict from the collections library. In Python 3.6, this is the default behavior of dict, but in earlier versions of Python, we'll need to be explicit by using OrderedDict.
Step8: We'll store a few variables that will determine the size of our network. First, batch_size determines how many sequences at a time we'll train on. The seqence_length parameter defines the maximum length to unroll our recurrent network for. This is effectively the depth of our network during training to help guide gradients along. Within each layer, we'll have n_cell LSTM units, and n_layers layers worth of LSTM units. Finally, we'll store the total number of possible characters in our data, which will determine the size of our one hot encoding (like we had for MNIST in Session 3).
Step9: Let's now create the input and output to our network. We'll use placeholders and feed these in later. The size of these need to be [batch_size, sequence_length]. We'll then see how to build the network in between.
Step10: The first thing we need to do is convert each of our sequence_length vectors in our batch to n_cells LSTM cells. We use a lookup table to find the value in X and use this as the input to n_cells LSTM cells. Our lookup table has n_chars possible elements and connects each character to n_cells cells. We create our lookup table using tf.get_variable and then the function tf.nn.embedding_lookup to connect our X placeholder to n_cells number of neurons.
Step11: Now recall from the lecture that recurrent neural networks share their weights across timesteps. So we don't want to have one large matrix with every timestep, but instead separate them. We'll use tf.split to split our [batch_size, sequence_length, n_cells] array in Xs into a list of sequence_length elements each composed of [batch_size, n_cells] arrays. This gives us sequence_length number of arrays of [batch_size, 1, n_cells]. We then use tf.squeeze to remove the 1st index corresponding to the singleton sequence_length index, resulting in simply [batch_size, n_cells].
Step12: With each of our timesteps split up, we can now connect them to a set of LSTM recurrent cells. We tell the tf.contrib.rnn.BasicLSTMCell method how many cells we want, i.e. how many neurons there are, and we also specify that our state will be stored as a tuple. This state defines the internal state of the cells as well as the connection from the previous timestep. We can also pass a value for the forget_bias. Be sure to experiment with this parameter as it can significantly effect performance (e.g. Gers, Felix A, Schmidhuber, Jurgen, and Cummins, Fred. Learning to forget
Step13: Let's take a look at the cell's state size
Step14: c defines the internal memory and h the output. We'll have as part of our cells, both an initial_state and a final_state. These will become important during inference and we'll see how these work more then. For now, we'll set the initial_state to all zeros using the convenience function provided inside our cells object, zero_state
Step15: Looking at what this does, we can see that it creates a tf.Tensor of zeros for our c and h states for each of our n_cells and stores this as a tuple inside the LSTMStateTuple object
Step16: So far, we have created a single layer of LSTM cells composed of n_cells number of cells. If we want another layer, we can use the tf.contrib.rnn.MultiRNNCell method, giving it our current cells, and a bit of pythonery to multiply our cells by the number of layers we want. We'll then update our initial_state variable to include the additional cells
Step17: Now if we take a look at our initial_state, we should see one LSTMStateTuple for each of our layers
Step18: So far, we haven't connected our recurrent cells to anything. Let's do this now using the tf.contrib.rnn.static_rnn method. We also pass it our initial_state variables. It gives us the outputs of the rnn, as well as their states after having been computed. Contrast that with the initial_state, which set the LSTM cells to zeros. After having computed something, the cells will all have a different value somehow reflecting the temporal dynamics and expectations of the next input. These will be stored in the state tensors for each of our LSTM layers inside a LSTMStateTuple just like the initial_state variable.
Step19: Let's take a look at the state now
Step20: Our outputs are returned as a list for each of our timesteps
Step21: We'll now stack all our outputs for every timestep. We can treat every observation at each timestep and for each batch using the same weight matrices going forward, since these should all have shared weights. Each timstep for each batch is its own observation. So we'll stack these in a 2d matrix so that we can create our softmax layer
Step22: Our outputs are now concatenated so that we have [batch_size * timesteps, n_cells]
Step23: We now create a softmax layer just like we did in Session 3 and in Session 3's homework. We multiply our final LSTM layer's n_cells outputs by a weight matrix to give us n_chars outputs. We then scale this output using a tf.nn.softmax layer so that they become a probability by exponentially scaling its value and dividing by its sum. We store the softmax probabilities in probs as well as keep track of the maximum index in Y_pred
Step24: To train the network, we'll measure the loss between our predicted outputs and true outputs. We could use the probs variable, but we can also make use of tf.nn.softmax_cross_entropy_with_logits which will compute the softmax for us. We therefore need to pass in the variable just before the softmax layer, denoted as logits (unscaled values). This takes our variable logits, the unscaled predicted outputs, as well as our true outputs, Y. Before we give it Y, we'll need to reshape our true outputs in the same way, [batch_size x timesteps, n_chars]. Luckily, tensorflow provides a convenience for doing this, the tf.nn.sparse_softmax_cross_entropy_with_logits function
Step25: Finally, we can create an optimizer in much the same way as we've done with every other network. Except, we will also "clip" the gradients of every trainable parameter. This is a hacky way to ensure that the gradients do not grow too large (the literature calls this the "exploding gradient problem"). However, note that the LSTM is built to help ensure this does not happen by allowing the gradient to be "gated". To learn more about this, please consider reading the following material
Step26: Let's take a look at the graph
Step27: Below is the rest of code we'll need to train the network. I do not recommend running this inside Jupyter Notebook for the entire length of the training because the network can take 1-2 days at least to train, and your browser may very likely complain. Instead, you should write a python script containing the necessary bits of code and run it using the Terminal. We didn't go over how to do this, so I'll leave it for you as an exercise. The next part of this notebook will have you load a pre-trained network.
Step28: <a name="part-5---pretrained-char-rnn-of-donald-trump"></a>
Step29: <a name="getting-the-trump-data"></a>
Step30: Let's take a look at what's going on in here
Step31: <a name="basic-text-analysis"></a>
Step32: Now let's count their occurrences
Step33: We can sort this like so
Step34: As we should expect, "the" is the most common word, as it is in the English language
Step35: Let's get the checkpoint and build the model then restore the variables from the checkpoint. The only parameters of consequence are n_layers and n_cells which define the total size and layout of the model. The rest are flexible. We'll set the batch_size and sequence_length to 1, meaning we can feed in a single character at a time only, and get back 1 character denoting the very next character's prediction.
Step36: Let's now take a look at the model
Step37: <a name="inference-keeping-track-of-the-state"></a>
Step38: <a name="probabilistic-sampling"></a>
Step39: <a name="inference-temperature"></a>
Step40: <a name="inference-priming"></a>
Step41: <a name="assignment-submission"></a>
|
<ASSISTANT_TASK:>
Python Code:
# First check the Python version
import sys
if sys.version_info < (3,4):
print('You are running an older version of Python!\n\n',
'You should consider updating to Python 3.4.0 or',
'higher as the libraries built for this course',
'have only been tested in Python 3.4 and higher.\n')
print('Try installing the Python 3.5 version of anaconda'
'and then restart `jupyter notebook`:\n',
'https://www.continuum.io/downloads\n\n')
# Now get necessary libraries
try:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
from scipy.ndimage.filters import gaussian_filter
import IPython.display as ipyd
import tensorflow as tf
from libs import utils, gif, datasets, dataset_utils, nb_utils
except ImportError as e:
print("Make sure you have started notebook in the same directory",
"as the provided zip file which includes the 'libs' folder",
"and the file 'utils.py' inside of it. You will NOT be able",
"to complete this assignment unless you restart jupyter",
"notebook inside the directory created by extracting",
"the zip file or cloning the github repo.")
print(e)
# We'll tell matplotlib to inline any drawn figures like so:
%matplotlib inline
plt.style.use('ggplot')
# Bit of formatting because I don't like the default inline code style:
from IPython.core.display import HTML
HTML(<style> .rendered_html code {
padding: 2px 4px;
color: #c7254e;
background-color: #f9f2f4;
border-radius: 4px;
} </style>)
import tensorflow as tf
from six.moves import urllib
script = 'http://www.awesomefilm.com/script/biglebowski.txt'
txts = []
f, _ = urllib.request.urlretrieve(script, script.split('/')[-1])
with open(f, 'r') as fp:
txt = fp.read()
txt[:100]
txt = "\n".join([txt_i.strip()
for txt_i in txt.replace('\t', '').split('\n')
if len(txt_i)])
len(txt)
vocab = list(set(txt))
vocab.sort()
print(len(vocab))
print(vocab)
from collections import OrderedDict
encoder = OrderedDict(zip(vocab, range(len(vocab))))
decoder = OrderedDict(zip(range(len(vocab)), vocab))
encoder
# Number of sequences in a mini batch
batch_size = 100
# Number of characters in a sequence
sequence_length = 50
# Number of cells in our LSTM layer
n_cells = 128
# Number of LSTM layers
n_layers = 3
# Total number of characters in the one-hot encoding
n_chars = len(vocab)
X = tf.placeholder(tf.int32, shape=..., name='X')
# We'll have a placeholder for our true outputs
Y = tf.placeholder(tf.int32, shape=..., name='Y')
# we first create a variable to take us from our one-hot representation to our LSTM cells
embedding = tf.get_variable("embedding", [n_chars, n_cells])
# And then use tensorflow's embedding lookup to look up the ids in X
Xs = tf.nn.embedding_lookup(embedding, X)
# The resulting lookups are concatenated into a dense tensor
print(Xs.get_shape().as_list())
with tf.name_scope('reslice'):
Xs = [tf.squeeze(seq, [1])
for seq in tf.split(Xs, sequence_length, 1)]
cells = tf.contrib.rnn.BasicLSTMCell(num_units=n_cells, state_is_tuple=True, forget_bias=1.0)
cells.state_size
initial_state = cells.zero_state(tf.shape(X)[0], tf.float32)
initial_state
cells = tf.contrib.rnn.MultiRNNCell(
[cells] * n_layers, state_is_tuple=True)
initial_state = cells.zero_state(tf.shape(X)[0], tf.float32)
initial_state
outputs, state = tf.contrib.rnn.static_rnn(cell=..., inputs=..., initial_state=...)
state
outputs
outputs_flat = tf.reshape(tf.concat(values=outputs, axis=1), [-1, n_cells])
outputs_flat
with tf.variable_scope('prediction'):
W = tf.get_variable(
"W",
shape=[n_cells, n_chars],
initializer=tf.random_normal_initializer(stddev=0.1))
b = tf.get_variable(
"b",
shape=[n_chars],
initializer=tf.random_normal_initializer(stddev=0.1))
# Find the output prediction of every single character in our minibatch
# we denote the pre-activation prediction, logits.
logits = tf.matmul(outputs_flat, W) + b
# We get the probabilistic version by calculating the softmax of this
probs = tf.nn.softmax(logits)
# And then we can find the index of maximum probability
Y_pred = tf.argmax(probs, 1)
with tf.variable_scope('loss'):
# Compute mean cross entropy loss for each output.
Y_true_flat = tf.reshape(tf.concat(values=Y, axis=1), [-1])
# logits are [batch_size x timesteps, n_chars] and
# Y_true_flat are [batch_size x timesteps]
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y_true_flat, logits=logits)
# Compute the mean over our `batch_size` x `timesteps` number of observations
mean_loss = tf.reduce_mean(loss)
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
gradients = []
clip = tf.constant(5.0, name="clip")
for grad, var in optimizer.compute_gradients(mean_loss):
gradients.append((tf.clip_by_value(grad, -clip, clip), var))
updates = optimizer.apply_gradients(gradients)
nb_utils.show_graph(tf.get_default_graph().as_graph_def())
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
cursor = 0
it_i = 0
while it_i < 500:
Xs, Ys = [], []
for batch_i in range(batch_size):
if (cursor + sequence_length) >= len(txt) - sequence_length - 1:
cursor = 0
Xs.append([encoder[ch]
for ch in txt[cursor:cursor + sequence_length]])
Ys.append([encoder[ch]
for ch in txt[cursor + 1: cursor + sequence_length + 1]])
cursor = (cursor + sequence_length)
Xs = np.array(Xs).astype(np.int32)
Ys = np.array(Ys).astype(np.int32)
loss_val, _ = sess.run([mean_loss, updates],
feed_dict={X: Xs, Y: Ys})
if it_i % 100 == 0:
print(it_i, loss_val)
if it_i % 500 == 0:
p = sess.run(probs, feed_dict={X: np.array(Xs[-1])[np.newaxis]})
ps = [np.random.choice(range(n_chars), p=p_i.ravel())
for p_i in p]
p = [np.argmax(p_i) for p_i in p]
if isinstance(txt[0], str):
print('original:', "".join(
[decoder[ch] for ch in Xs[-1]]))
print('synth(samp):', "".join(
[decoder[ch] for ch in ps]))
print('synth(amax):', "".join(
[decoder[ch] for ch in p]))
else:
print([decoder[ch] for ch in ps])
it_i += 1
tf.reset_default_graph()
with open('trump.txt', 'r') as fp:
txt = fp.read()
txt[:100]
words = set(txt.split(' '))
words
counts = {word_i: 0 for word_i in words}
for word_i in txt.split(' '):
counts[word_i] += 1
counts
[(word_i, counts[word_i]) for word_i in sorted(counts, key=counts.get, reverse=True)]
from libs import charrnn
ckpt_name = './trump.ckpt'
g = tf.Graph()
n_layers = 3
n_cells = 512
with tf.Session(graph=g) as sess:
model = charrnn.build_model(txt=txt,
batch_size=1,
sequence_length=1,
n_layers=n_layers,
n_cells=n_cells,
gradient_clip=10.0)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("Model restored.")
nb_utils.show_graph(g.as_graph_def())
n_iterations = 100
curr_states = None
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = charrnn.build_model(txt=txt,
batch_size=1,
sequence_length=1,
n_layers=n_layers,
n_cells=n_cells,
gradient_clip=10.0)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("Model restored.")
# Get every tf.Tensor for the initial state
init_states = []
for s_i in model['initial_state']:
init_states.append(s_i.c)
init_states.append(s_i.h)
# Similarly, for every state after inference
final_states = []
for s_i in model['final_state']:
final_states.append(s_i.c)
final_states.append(s_i.h)
# Let's start with the letter 't' and see what comes out:
synth = [[encoder[' ']]]
for i in range(n_iterations):
# We'll create a feed_dict parameter which includes what to
# input to the network, model['X'], as well as setting
# dropout to 1.0, meaning no dropout.
feed_dict = {model['X']: [synth[-1]],
model['keep_prob']: 1.0}
# Now we'll check if we currently have a state as a result
# of a previous inference, and if so, add to our feed_dict
# parameter the mapping of the init_state to the previous
# output state stored in "curr_states".
if curr_states:
feed_dict.update(
{init_state_i: curr_state_i
for (init_state_i, curr_state_i) in
zip(init_states, curr_states)})
# Now we can infer and see what letter we get
p = sess.run(model['probs'], feed_dict=feed_dict)[0]
# And make sure we also keep track of the new state
curr_states = sess.run(final_states, feed_dict=feed_dict)
# Find the most likely character
p = np.argmax(p)
# Append to string
synth.append([p])
# Print out the decoded letter
print(model['decoder'][p], end='')
sys.stdout.flush()
curr_states = None
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = charrnn.build_model(txt=txt,
batch_size=1,
sequence_length=1,
n_layers=n_layers,
n_cells=n_cells,
gradient_clip=10.0)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("Model restored.")
# Get every tf.Tensor for the initial state
init_states = []
for s_i in model['initial_state']:
init_states.append(s_i.c)
init_states.append(s_i.h)
# Similarly, for every state after inference
final_states = []
for s_i in model['final_state']:
final_states.append(s_i.c)
final_states.append(s_i.h)
# Let's start with the letter 't' and see what comes out:
synth = [[encoder[' ']]]
for i in range(n_iterations):
# We'll create a feed_dict parameter which includes what to
# input to the network, model['X'], as well as setting
# dropout to 1.0, meaning no dropout.
feed_dict = {model['X']: [synth[-1]],
model['keep_prob']: 1.0}
# Now we'll check if we currently have a state as a result
# of a previous inference, and if so, add to our feed_dict
# parameter the mapping of the init_state to the previous
# output state stored in "curr_states".
if curr_states:
feed_dict.update(
{init_state_i: curr_state_i
for (init_state_i, curr_state_i) in
zip(init_states, curr_states)})
# Now we can infer and see what letter we get
p = sess.run(model['probs'], feed_dict=feed_dict)[0]
# And make sure we also keep track of the new state
curr_states = sess.run(final_states, feed_dict=feed_dict)
# Now instead of finding the most likely character,
# we'll sample with the probabilities of each letter
p = p.astype(np.float64)
p = np.random.multinomial(1, p.ravel() / p.sum())
p = np.argmax(p)
# Append to string
synth.append([p])
# Print out the decoded letter
print(model['decoder'][p], end='')
sys.stdout.flush()
temperature = 0.5
curr_states = None
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = charrnn.build_model(txt=txt,
batch_size=1,
sequence_length=1,
n_layers=n_layers,
n_cells=n_cells,
gradient_clip=10.0)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("Model restored.")
# Get every tf.Tensor for the initial state
init_states = []
for s_i in model['initial_state']:
init_states.append(s_i.c)
init_states.append(s_i.h)
# Similarly, for every state after inference
final_states = []
for s_i in model['final_state']:
final_states.append(s_i.c)
final_states.append(s_i.h)
# Let's start with the letter 't' and see what comes out:
synth = [[encoder[' ']]]
for i in range(n_iterations):
# We'll create a feed_dict parameter which includes what to
# input to the network, model['X'], as well as setting
# dropout to 1.0, meaning no dropout.
feed_dict = {model['X']: [synth[-1]],
model['keep_prob']: 1.0}
# Now we'll check if we currently have a state as a result
# of a previous inference, and if so, add to our feed_dict
# parameter the mapping of the init_state to the previous
# output state stored in "curr_states".
if curr_states:
feed_dict.update(
{init_state_i: curr_state_i
for (init_state_i, curr_state_i) in
zip(init_states, curr_states)})
# Now we can infer and see what letter we get
p = sess.run(model['probs'], feed_dict=feed_dict)[0]
# And make sure we also keep track of the new state
curr_states = sess.run(final_states, feed_dict=feed_dict)
# Now instead of finding the most likely character,
# we'll sample with the probabilities of each letter
p = p.astype(np.float64)
p = np.log(p) / temperature
p = np.exp(p) / np.sum(np.exp(p))
p = np.random.multinomial(1, p.ravel() / p.sum())
p = np.argmax(p)
# Append to string
synth.append([p])
# Print out the decoded letter
print(model['decoder'][p], end='')
sys.stdout.flush()
prime = "obama"
temperature = 1.0
curr_states = None
n_iterations = 500
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = charrnn.build_model(txt=txt,
batch_size=1,
sequence_length=1,
n_layers=n_layers,
n_cells=n_cells,
gradient_clip=10.0)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("Model restored.")
# Get every tf.Tensor for the initial state
init_states = []
for s_i in model['initial_state']:
init_states.append(s_i.c)
init_states.append(s_i.h)
# Similarly, for every state after inference
final_states = []
for s_i in model['final_state']:
final_states.append(s_i.c)
final_states.append(s_i.h)
# Now we'll keep track of the state as we feed it one
# letter at a time.
curr_states = None
for ch in prime:
feed_dict = {model['X']: [[model['encoder'][ch]]],
model['keep_prob']: 1.0}
if curr_states:
feed_dict.update(
{init_state_i: curr_state_i
for (init_state_i, curr_state_i) in
zip(init_states, curr_states)})
# Now we can infer and see what letter we get
p = sess.run(model['probs'], feed_dict=feed_dict)[0]
p = p.astype(np.float64)
p = np.log(p) / temperature
p = np.exp(p) / np.sum(np.exp(p))
p = np.random.multinomial(1, p.ravel() / p.sum())
p = np.argmax(p)
# And make sure we also keep track of the new state
curr_states = sess.run(final_states, feed_dict=feed_dict)
# Now we're ready to do what we were doing before but with the
# last predicted output stored in `p`, and the current state of
# the model.
synth = [[p]]
print(prime + model['decoder'][p], end='')
for i in range(n_iterations):
# Input to the network
feed_dict = {model['X']: [synth[-1]],
model['keep_prob']: 1.0}
# Also feed our current state
feed_dict.update(
{init_state_i: curr_state_i
for (init_state_i, curr_state_i) in
zip(init_states, curr_states)})
# Inference
p = sess.run(model['probs'], feed_dict=feed_dict)[0]
# Keep track of the new state
curr_states = sess.run(final_states, feed_dict=feed_dict)
# Sample
p = p.astype(np.float64)
p = np.log(p) / temperature
p = np.exp(p) / np.sum(np.exp(p))
p = np.random.multinomial(1, p.ravel() / p.sum())
p = np.argmax(p)
# Append to string
synth.append([p])
# Print out the decoded letter
print(model['decoder'][p], end='')
sys.stdout.flush()
utils.build_submission('session-5.zip',
('vaegan.gif',
'session-5-part-1.ipynb',
'session-5-part-2.ipynb'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Connect to Cloud Analytic Services in SAS Viya
Step2: Load the action set for hypergroup
Step3: Load data into CAS
Step4: Hypergroup doesn't support numeric source and target columns - so make sure to cast them as varchars.
Step5: Data Exploration
Step6: Graph rendering utility
Step7: Execute community and hypergroup detection
Step8: Note
Step9: How many hypergroups and communities do we have?
Step10: Basic community analysis
Step11: Note
Step12: What edges do we have?
Step13: Render the network graph
Step14: Analyze node centrality
Step15: Between-ness centrality quantifies the number of times a node acts as a bridge along the shortest path(s) between two other nodes. As such it describes the importance of a node in a network.
Step16: Filter communities
|
<ASSISTANT_TASK:>
Python Code:
import swat
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
# Also import networkx used for rendering a network
import networkx as nx
%matplotlib inline
s = swat.CAS('http://cas.mycompany.com:8888') # REST API
s.loadactionset('hypergroup')
df = pd.DataFrame.from_records([[2,1],[3,1],[3,2],[4,1],[4,2],[4,3],[5,1],[6,1],[7,1],[7,5],[7,6],[8,1],[8,2],[8,3],[8,4],[9,1],[9,3],[10,3],[11,1],[11,5],[11,6],[12,1],[13,1],[13,4],[14,1],[14,2],[14,3],[14,4],[17,6],[17,7],[18,1],[18,2],[20,1],[20,2],[22,1],[22,2],[26,24],[26,25],[28,3],[28,24],[28,25],[29,3],[30,24],[30,27],[31,2],[31,9],[32,1],[32,25],[32,26],[32,29],[33,3],[33,9],[33,15],[33,16],[33,19],[33,21],[33,23],[33,24],[33,30],[33,31],[33,32],[34,9],[34,10],[34,14],[34,15],[34,16],[34,19],[34,20],[34,21],[34,23],[34,24],[34,27],[34,28],[34,29],[34,30],[34,31],[34,32],[34,33]],
columns=['FROM','TO'])
df['SOURCE'] = df['FROM'].astype(str)
df['TARGET'] = df['TO'].astype(str)
df.head()
if s.tableexists('karate').exists:
s.CASTable('KARATE').droptable()
dataset = s.upload(df,
importoptions=dict(filetype='csv',
vars=[dict(type='double'),
dict(type='double'),
dict(type='varchar'),
dict(type='varchar')]),
casout=dict(name='KARATE', promote=True)).casTable
dataset.head(5)
dataset.summary()
def renderNetworkGraph(filterCommunity=-1, size=18, sizeVar='_HypGrp_',
colorVar='', sizeMultipler=500, nodes_table='nodes',
edges_table='edges'):
''' Build an array of node positions and related colors based on community '''
nodes = s.CASTable(nodes_table)
if filterCommunity >= 0:
nodes = nodes.query('_Community_ EQ %F' % filterCommunity)
nodes = nodes.to_frame()
nodePos = {}
nodeColor = {}
nodeSize = {}
communities = []
i = 0
for nodeId in nodes._Value_:
nodePos[nodeId] = (nodes._AllXCoord_[i], nodes._AllYCoord_[i])
if colorVar:
nodeColor[nodeId] = nodes[colorVar][i]
if nodes[colorVar][i] not in communities:
communities.append(nodes[colorVar][i])
nodeSize[nodeId] = max(nodes[sizeVar][i],0.1)*sizeMultipler
i += 1
communities.sort()
# Build a list of source-target tuples
edges = s.CASTable(edges_table)
if filterCommunity >= 0:
edges = edges.query('_SCommunity_ EQ %F AND _TCommunity_ EQ %F' %
(filterCommunity, filterCommunity))
edges = edges.to_frame()
edgeTuples = []
for i, p in enumerate(edges._Source_):
edgeTuples.append( (edges._Source_[i], edges._Target_[i]) )
# Add nodes and edges to the graph
plt.figure(figsize=(size,size))
graph = nx.DiGraph()
graph.add_edges_from(edgeTuples)
# Size mapping
getNodeSize=[nodeSize[v] for v in graph]
# Color mapping
jet = cm = plt.get_cmap('jet')
getNodeColor=None
if colorVar:
getNodeColor=[nodeColor[v] for v in graph]
cNorm = colors.Normalize(vmin=min(communities), vmax=max(communities))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
# Using a figure here to work-around the fact that networkx doesn't
# produce a labelled legend
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
for community in communities:
ax.plot([0],[0], color=scalarMap.to_rgba(community),
label='Community %s' % '{:2.0f}'.format(community), linewidth=10)
# Render the graph
nx.draw_networkx_nodes(graph, nodePos, node_size=getNodeSize,
node_color=getNodeColor, cmap=jet)
nx.draw_networkx_edges(graph, nodePos, width=1, alpha=0.5)
nx.draw_networkx_labels(graph, nodePos, font_size=11, font_family='sans-serif')
if len(communities) > 0:
plt.legend(loc='upper left', prop={'size':11})
plt.title('Zachary Karate Club social network', fontsize=30)
plt.axis('off')
plt.show()
# Create output table objects
edges = s.CASTable('edges', replace=True)
nodes = s.CASTable('nodes', replace=True)
dataset[['SOURCE', 'TARGET']].hyperGroup(
createOut = 'never',
allGraphs = True,
edges = edges,
vertices = nodes
)
renderNetworkGraph(size=10, sizeMultipler=2000)
dataset[['SOURCE', 'TARGET']].hyperGroup(
createOut = 'never',
allGraphs = True,
community = True,
edges = edges,
vertices = nodes
)
nodes.distinct()
nodes.summary()
topKOut = s.CASTable('topKOut', replace=True)
nodes[['_Community_']].topk(
aggregator = 'N',
topK = 4,
casOut = topKOut
)
topKOut = topKOut.sort_values('_Rank_').head(10)
topKOut.columns
nCommunities = len(topKOut)
ind = np.arange(nCommunities) # the x locations for the groups
plt.figure(figsize=(8,4))
p1 = plt.bar(ind + 0.2, topKOut._Score_, 0.5, color='orange', alpha=0.75)
plt.ylabel('Vertices', fontsize=12)
plt.xlabel('Community', fontsize=12)
plt.title('Number of nodes for the top %s communities' % '{:2.0f}'.format(nCommunities))
plt.xticks(ind + 0.2, topKOut._Fmtvar_)
plt.show()
nodes.query('_Community_ EQ 1').head(5)
edges.head(5)
renderNetworkGraph(size=10, colorVar='_Community_', sizeMultipler=2000)
dataset[['SOURCE', 'TARGET']].hyperGroup(
createOut = 'never',
community = True,
centrality = True,
mergeCommSmallest = True,
allGraphs = True,
graphPartition = True,
scaleCentralities = 'central1', # Returns centrality values closer to 1 in the center
edges = edges,
vertices = nodes
)
nodes.head()
renderNetworkGraph(size=10, colorVar='_Community_', sizeVar='_Betweenness_')
renderNetworkGraph(1, size=10, sizeVar='_CentroidAngle_', sizeMultipler=5)
s.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Observe que os itens sao apresentados na forma de Tuplas representando o par chave-valor**
|
<ASSISTANT_TASK:>
Python Code:
# Criando um dicionario vazio
d = {}
# Adicionando elementos para chave-valor
d['a'] = 'alpha'
d['o'] = 'omega'
d['g'] = 'gamma'
# algumas propriedades uteis
d
#Exibindo as chaves
d.keys()
# Iterando sobre as chaves
for k in d.keys(): print 'Key:',k,'->',d[k]
#Exibindo os valores
d.values()
#Exibindo os itens
d.items()
# Iterando sobre as Tulas que representam o par chave-valor
for tup in d.items(): print(tup)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some metadata is required before registration
Step2: Load the sample data
Step3: Preprocessing
Step4: Registration
Step5: Visualize registered image
Step6: Quantitative evaluation
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib
import ndreg
from ndreg import preprocessor, util, plotter
import SimpleITK as sitk
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
def myshow(img, cmap='gray', colorbar=False):
plt.imshow(sitk.GetArrayViewFromImage(img), cmap=cmap)
if colorbar: plt.colorbar()
plt.axis('off')
plt.show()
params = {
# input image path
'image_path': './Thy1eYFP_Control_9.tiff',
# voxel spacing is in mm and corresponds to (x, y, z) spacing
'image_spacing': (0.04128, 0.04128, 0.04128),
'image_orientation': 'rpi',
# the modality can be 'lavision' or 'colm'
'image_modality': 'lavision',
'atlas_spacing': (0.05, 0.05, 0.05),
'atlas_path': './ARA_50um.tiff',
}
img = util.imgRead(params['image_path'])
img.SetSpacing(params['image_spacing'])
atlas = util.imgRead(params['atlas_path'])
atlas.SetSpacing(params['atlas_spacing'])
plotter.imgShow(img, vmax=2000)
plotter.imgShow(atlas, vmax=400)
img_p = preprocessor.preprocess_brain(img,
params['atlas_spacing'],
params['image_modality'],
params['image_orientation'])
atlas_registered = ndreg.register_brain(atlas, img_p)
plotter.imgShow(atlas_registered)
plotter.imgShow(plotter.imgChecker(atlas_registered, img_p), vmax=2)
ndreg.imgMSE(sitk.Normalize(atlas), sitk.Normalize(img))
ndreg.imgMSE(sitk.Normalize(atlas), sitk.Normalize(img_p))
ndreg.imgMSE(sitk.Normalize(atlas_registered), sitk.Normalize(img_p))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lame params
Step2: Metric tensor
Step3: ${\displaystyle \hat{G}=\sum_{i,j} g_{ij}\vec{R}^i\vec{R}^j}$
Step4: Christoffel symbols
Step5: Gradient of vector
Step6: Physical coordinates
Step7: Strain tensor
Step8: Virtual work
Step9: Tymoshenko theory
Step10: Square theory
Step11: Mass matrix
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
from geom_util import *
from sympy.vector import CoordSys3D
N = CoordSys3D('N')
alpha1, alpha2, alpha3 = symbols("alpha_1 alpha_2 alpha_3", real = True, positive=True)
init_printing()
%matplotlib inline
%reload_ext autoreload
%autoreload 2
%aimport geom_util
H1=symbols('H1')
H2=S(1)
H3=S(1)
H=[H1, H2, H3]
DIM=3
dH = zeros(DIM,DIM)
for i in range(DIM):
for j in range(DIM):
if (i == 0 and j != 1):
dH[i,j]=Symbol('H_{{{},{}}}'.format(i+1,j+1))
dH
G_up = getMetricTensorUpLame(H1, H2, H3)
G_down = getMetricTensorDownLame(H1, H2, H3)
DIM=3
G_down_diff = MutableDenseNDimArray.zeros(DIM, DIM, DIM)
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
G_down_diff[i,i,k]=2*H[i]*dH[i,k]
GK = getChristoffelSymbols2(G_up, G_down_diff, (alpha1, alpha2, alpha3))
GK
def row_index_to_i_j_grad(i_row):
return i_row // 3, i_row % 3
B = zeros(9, 12)
B[0,1] = S(1)
B[1,2] = S(1)
B[2,3] = S(1)
B[3,5] = S(1)
B[4,6] = S(1)
B[5,7] = S(1)
B[6,9] = S(1)
B[7,10] = S(1)
B[8,11] = S(1)
for row_index in range(9):
i,j=row_index_to_i_j_grad(row_index)
B[row_index, 0] = -GK[i,j,0]
B[row_index, 4] = -GK[i,j,1]
B[row_index, 8] = -GK[i,j,2]
B
P=zeros(12,12)
P[0,0]=H[0]
P[1,0]=dH[0,0]
P[1,1]=H[0]
P[2,0]=dH[0,1]
P[2,2]=H[0]
P[3,0]=dH[0,2]
P[3,3]=H[0]
P[4,4]=H[1]
P[5,4]=dH[1,0]
P[5,5]=H[1]
P[6,4]=dH[1,1]
P[6,6]=H[1]
P[7,4]=dH[1,2]
P[7,7]=H[1]
P[8,8]=H[2]
P[9,8]=dH[2,0]
P[9,9]=H[2]
P[10,8]=dH[2,1]
P[10,10]=H[2]
P[11,8]=dH[2,2]
P[11,11]=H[2]
P=simplify(P)
P
B_P = zeros(9,9)
for i in range(3):
for j in range(3):
row_index = i*3+j
B_P[row_index, row_index] = 1/(H[i]*H[j])
Grad_U_P = simplify(B_P*B*P)
Grad_U_P
E=zeros(6,9)
E[0,0]=1
E[1,4]=1
E[2,8]=1
E[3,1]=1
E[3,3]=1
E[4,2]=1
E[4,6]=1
E[5,5]=1
E[5,7]=1
E
StrainL=simplify(E*Grad_U_P)
StrainL
def E_NonLinear(grad_u):
N = 3
du = zeros(N, N)
# print("===Deformations===")
for i in range(N):
for j in range(N):
index = i*N+j
du[j,i] = grad_u[index]
# print("========")
I = eye(3)
a_values = S(1)/S(2) * du * G_up
E_NL = zeros(6,9)
E_NL[0,0] = a_values[0,0]
E_NL[0,3] = a_values[0,1]
E_NL[0,6] = a_values[0,2]
E_NL[1,1] = a_values[1,0]
E_NL[1,4] = a_values[1,1]
E_NL[1,7] = a_values[1,2]
E_NL[2,2] = a_values[2,0]
E_NL[2,5] = a_values[2,1]
E_NL[2,8] = a_values[2,2]
E_NL[3,1] = 2*a_values[0,0]
E_NL[3,4] = 2*a_values[0,1]
E_NL[3,7] = 2*a_values[0,2]
E_NL[4,0] = 2*a_values[2,0]
E_NL[4,3] = 2*a_values[2,1]
E_NL[4,6] = 2*a_values[2,2]
E_NL[5,2] = 2*a_values[1,0]
E_NL[5,5] = 2*a_values[1,1]
E_NL[5,8] = 2*a_values[1,2]
return E_NL
%aimport geom_util
u=getUHat3DPlane(alpha1, alpha2, alpha3)
# u=getUHatU3Main(alpha1, alpha2, alpha3)
gradu=B*u
E_NL = E_NonLinear(gradu)*B
E_NL
%aimport geom_util
u=getUHatU3MainPlane(alpha1, alpha2, alpha3)
gradup=Grad_U_P*u
# e=E*gradup
# e
E_NLp = E_NonLinear(gradup)*gradup
simplify(E_NLp)
w
%aimport geom_util
C_tensor = getIsotropicStiffnessTensor()
C = convertStiffnessTensorToMatrix(C_tensor)
C
StrainL.T*C*StrainL*H1
T=zeros(12,6)
T[0,0]=1
T[0,2]=alpha3
T[1,1]=1
T[1,3]=alpha3
T[3,2]=1
T[8,4]=1
T[9,5]=1
T
D_p_T = StrainL*T
simplify(D_p_T)
u = Function("u")
t = Function("theta")
w = Function("w")
u1=u(alpha1)+alpha3*t(alpha1)
u3=w(alpha1)
gu = zeros(12,1)
gu[0] = u1
gu[1] = u1.diff(alpha1)
gu[3] = u1.diff(alpha3)
gu[8] = u3
gu[9] = u3.diff(alpha1)
gradup=Grad_U_P*gu
# E_NLp = E_NonLinear(gradup)*gradup
# simplify(E_NLp)
# gradup=Grad_U_P*gu
# o20=(K*u(alpha1)-w(alpha1).diff(alpha1)+t(alpha1))/2
# o21=K*t(alpha1)
# O=1/2*o20*o20+alpha3*o20*o21-alpha3*K/2*o20*o20
# O=expand(O)
# O=collect(O,alpha3)
# simplify(O)
StrainNL = E_NonLinear(gradup)*gradup
StrainL*gu+simplify(StrainNL)
L=zeros(12,12)
h=Symbol('h')
p0=1/2-alpha3/h
p1=1/2+alpha3/h
p2=1-(2*alpha3/h)**2
L[0,0]=p0
L[0,2]=p1
L[0,4]=p2
L[1,1]=p0
L[1,3]=p1
L[1,5]=p2
L[3,0]=p0.diff(alpha3)
L[3,2]=p1.diff(alpha3)
L[3,4]=p2.diff(alpha3)
L[8,6]=p0
L[8,8]=p1
L[8,10]=p2
L[9,7]=p0
L[9,9]=p1
L[9,11]=p2
L[11,6]=p0.diff(alpha3)
L[11,8]=p1.diff(alpha3)
L[11,10]=p2.diff(alpha3)
L
D_p_L = StrainL*L
simplify(D_p_L)
h = 0.5
exp=(0.5-alpha3/h)*(1-(2*alpha3/h)**2)#/(1+alpha3*0.8)
p02=integrate(exp, (alpha3, -h/2, h/2))
integral = expand(simplify(p02))
integral
rho=Symbol('rho')
B_h=zeros(3,12)
B_h[0,0]=1
B_h[1,4]=1
B_h[2,8]=1
M=simplify(rho*P.T*B_h.T*G_up*B_h*P)
M
M_p = L.T*M*L
integrate(M_p, (alpha3, -h/2, h/2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Загрузка данных
Step2: Двухвыборочные критерии для независимых выборок
Step3: Ранговый критерий Манна-Уитни
Step4: Перестановочный критерий
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import itertools
from scipy import stats
from statsmodels.stats.descriptivestats import sign_test
from statsmodels.stats.weightstats import zconfint
from statsmodels.stats.weightstats import *
%pylab inline
seattle_data = pd.read_csv('seattle.txt', sep = '\t', header = 0)
seattle_data.shape
seattle_data.head()
price2001 = seattle_data[seattle_data['Year'] == 2001].Price
price2002 = seattle_data[seattle_data['Year'] == 2002].Price
pylab.figure(figsize=(12,4))
pylab.subplot(1,2,1)
pylab.grid()
pylab.hist(price2001, color = 'r')
pylab.xlabel('2001')
pylab.subplot(1,2,2)
pylab.grid()
pylab.hist(price2002, color = 'b')
pylab.xlabel('2002')
pylab.show()
print '95%% confidence interval for the mean: [%f, %f]' % zconfint(price2001)
print '95%% confidence interval for the mean: [%f, %f]' % zconfint(price2002)
stats.mannwhitneyu(price2001, price2002)
def permutation_t_stat_ind(sample1, sample2):
return np.mean(sample1) - np.mean(sample2)
def get_random_combinations(n1, n2, max_combinations):
index = range(n1 + n2)
indices = set([tuple(index)])
for i in range(max_combinations - 1):
np.random.shuffle(index)
indices.add(tuple(index))
return [(index[:n1], index[n1:]) for index in indices]
def permutation_zero_dist_ind(sample1, sample2, max_combinations = None):
joined_sample = np.hstack((sample1, sample2))
n1 = len(sample1)
n = len(joined_sample)
if max_combinations:
indices = get_random_combinations(n1, len(sample2), max_combinations)
else:
indices = [(list(index), filter(lambda i: i not in index, range(n))) \
for index in itertools.combinations(range(n), n1)]
distr = [joined_sample[list(i[0])].mean() - joined_sample[list(i[1])].mean() \
for i in indices]
return distr
pylab.hist(permutation_zero_dist_ind(price2001, price2002, max_combinations = 1000))
pylab.show()
def permutation_test(sample, mean, max_permutations = None, alternative = 'two-sided'):
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
t_stat = permutation_t_stat_ind(sample, mean)
zero_distr = permutation_zero_dist_ind(sample, mean, max_permutations)
if alternative == 'two-sided':
return sum([1. if abs(x) >= abs(t_stat) else 0. for x in zero_distr]) / len(zero_distr)
if alternative == 'less':
return sum([1. if x <= t_stat else 0. for x in zero_distr]) / len(zero_distr)
if alternative == 'greater':
return sum([1. if x >= t_stat else 0. for x in zero_distr]) / len(zero_distr)
print "p-value: %f" % permutation_test(price2001, price2002, max_permutations = 10000)
print "p-value: %f" % permutation_test(price2001, price2002, max_permutations = 50000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Date Data
Step2: Lag Time Data By One Row
|
<ASSISTANT_TASK:>
Python Code:
# Load library
import pandas as pd
# Create data frame
df = pd.DataFrame()
# Create data
df['dates'] = pd.date_range('1/1/2001', periods=5, freq='D')
df['stock_price'] = [1.1,2.2,3.3,4.4,5.5]
# Lagged values by one row
df['previous_days_stock_price'] = df['stock_price'].shift(1)
# Show data frame
df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Listes
Step2: Exercice 2
Step3: Mais attention le typage est toujours optionnel en Python
Step4: Exercice 3
Step5: Notre implémentation est évidemment plus lente que le test x in liste de la librarie standard...
Step6: Exercice 4
Step7: Exercice 5
Step8: La complexité est linéaire en $\mathcal{O}(\max(|\text{liste 1}|, |\text{liste 2}|)$.
Step9: Exercice 7
Step10: Exercice 8
Step11: Exercice 9
Step12: Une version purement fonctionnelle est moins facile qu'une version impérative avec une référence booléenne.
Step14: Listes simplement chaînée (manuellement définies)
Step15: Exercice 1
Step16: Exercice 2
Step17: On peut vérifier que cela marche en regardant, par exemple, l'id de deux objets si le deuxième est une copie du premier
Step18: Et donc pour concaténer deux chaînes, c'est facile
Step19: Exercice 3
Step20: Exercice 4
Step21: Exercice 5
Step22: La complexité est quadratique en $\mathcal{O}((\max(|\text{liste 1}|, |\text{liste 2}|)^2)$ à cause des recopies.
Step23: On peut facilement écrire une variante qui sera récursive terminale ("tail recursive")
Step24: Exercice 7
Step25: Et donc c'est rapide
Step26: Exercice 8
Step27: Si on veut les avoir dans l'ordre croissant, il faudrait utiliser miroir qui est quadratique.
Step28: Une autre approche est d'écrire la fonction mymap et de dire que
Step29: Exercice 9
Step30: Une version purement fonctionnelle est moins facile qu'une version impérative avec une référence booléenne.
Step31: On est prêt à écrire estPremier
Step32: En effet il suffit de construire d'abord la liste des entiers impairs de 2 à $\lfloor \sqrt{n} \rfloor$, de les filtrer par ceux qui divisent $n$, et de vérifier si on a aucun diviseur (taille(..) == 0) auquel cas $n$ est premier, ou si $n$ a au moins un diviseur auquel cas $n$ n'est pas premier.
Step33: On voit dans l'exemple ci dessus les nombres premiers comme ceux n'ayant aucun diviseurs, et les nombres non premiers comme ceux ayant au moins un diviseur.
Step34: Quelques tris par comparaison
Step35: Exercice 10
Step36: Complexité en temps $\mathcal{O}(n^2)$.
Step37: Exercice 12
Step38: (On voit que la liste autre a été inversée)
Step39: Complexité en temps
Step40: Complexité en temps $\mathcal{O}(n \log n)$.
Step41: C'est assez pour vérifier que le tri fusion est bien plus efficace que les autres.
Step42: Exercice 17
Step43: Exercice 18
Step44: Exercice 19
Step45: Exercice 20
Step46: Exercice 21
Step47: Est-ce que notre implémentation peut être plus rapide que le test x in liste ?
Step48: Exercice 22
Step49: Exercice 23
Step50: Exercice 24
Step51: Très pratique pour calculer des sommes, notamment.
Step52: Pour de petites listes, la version récursive est aussi efficace que la version impérative. Chouette !
Step53: Bonus
Step54: Exercice 26
Step55: Attention en Python, les listes ne sont PAS simplement chainées, donc lambda l, x
Step56: Avec une syntaxe améliorée, on se rapproche de très près de la syntaxe de Caml/OCaml
Step57: Exercice 28
Step58: Exercice 29
Step59: Exercice 30
Step60: Exercice 32
Step61: Pourquoi ont-ils une complexité quadratique ? La concaténation (@ en OCaml, + en Python) ne se fait pas en temps constant mais linéaire dans la taille de la plus longue liste.
Step62: Exercice 34
Step63: En remplaçant la file par une pile (une simple list), on obtient le parcours en profondeur, avec la même complexité.
Step64: Exercice 35 et fin
Step65: L'idée de cette solution est la suivante
Step66: Et cet exemple va échouer
Step67: Reconstruction depuis le parcours en largeur
Step68: Le même algorithme (enfin presque, modulo interversion de g et d)
|
<ASSISTANT_TASK:>
Python Code:
from sys import version
print(version)
from typing import TypeVar, List
_a = TypeVar('alpha')
def taille(liste : List[_a]) -> int:
longueur = 0
for _ in liste:
longueur += 1
return longueur
taille([])
taille([1, 2, 3])
len([])
len([1, 2, 3])
from typing import TypeVar, List
_a = TypeVar('alpha')
def concatene(liste1 : List[_a], liste2 : List[_a]) -> List[_a]:
# return liste1 + liste2 # easy solution
liste = []
for i in liste1:
liste.append(i)
for i in liste2:
liste.append(i)
return liste
concatene([1, 2], [3, 4])
[1, 2] + [3, 4]
concatene([1, 2], ["pas", "entier", "?"])
from typing import TypeVar, List
_a = TypeVar('alpha')
def appartient(x : _a, liste : List[_a]) -> bool:
for y in liste:
if x == y:
return True # on stoppe avant la fin
return False
appartient(1, [])
appartient(1, [1])
appartient(1, [1, 2, 3])
appartient(4, [1, 2, 3])
1 in []
1 in [1]
1 in [1, 2, 3]
4 in [1, 2, 3]
%timeit appartient(1000, list(range(10000)))
%timeit 1000 in list(range(10000))
from typing import TypeVar, List
_a = TypeVar('alpha')
def miroir(liste : List[_a]) -> List[_a]:
# return liste[::-1] # version facile
liste2 = []
for x in liste:
liste2.insert(0, x)
return liste2
miroir([2, 3, 5, 7, 11])
[2, 3, 5, 7, 11][::-1]
%timeit miroir([2, 3, 5, 7, 11])
%timeit [2, 3, 5, 7, 11][::-1]
from typing import TypeVar, List
_a = TypeVar('alpha')
def alterne(liste1 : List[_a], liste2 : List[_a]) -> List[_a]:
liste3 = []
i, j = 0, 0
n, m = len(liste1), len(liste2)
while i < n and j < m: # encore deux
liste3.append(liste1[i])
i += 1
liste3.append(liste2[j])
j += 1
while i < n: # si n > m
liste3.append(liste1[i])
i += 1
while j < m: # ou si n < m
liste3.append(liste2[j])
j += 1
return liste3
alterne([3, 5], [2, 4, 6])
alterne([1, 3, 5], [2, 4, 6])
alterne([1, 3, 5], [4, 6])
from typing import TypeVar, List
_a = TypeVar('alpha')
def nb_occurrences(x : _a, liste : List[_a]) -> int:
nb = 0
for y in liste:
if x == y:
nb += 1
return nb
nb_occurrences(0, [1, 2, 3, 4])
nb_occurrences(2, [1, 2, 3, 4])
nb_occurrences(2, [1, 2, 2, 3, 2, 4])
nb_occurrences(5, [1, 2, 3, 4])
filter?
from typing import List
def pairs(liste : List[int]) -> List[int]:
# return list(filter(lambda x : x % 2 == 0, liste))
return [x for x in liste if x % 2 == 0]
pairs([1, 2, 3, 4, 5, 6])
pairs([1, 2, 3, 4, 5, 6, 7, 100000])
pairs([1, 2, 3, 4, 5, 6, 7, 100000000000])
pairs([1, 2, 3, 4, 5, 6, 7, 1000000000000000000])
from typing import List
def myrange(n : int) -> List[int]:
liste = []
i = 1
while i <= n:
liste.append(i)
i += 1
return liste
myrange(4)
from typing import List
def intervale(a : int, b : int=None) -> List[int]:
if b == None:
a, b = 1, a
liste = []
i = a
while i <= b:
liste.append(i)
i += 1
return liste
intervale(10)
intervale(1, 4)
def racine(n : int) -> int:
i = 1
for i in range(n + 1):
if i*i > n:
return i - 1
return i
racine(1)
racine(5)
racine(102)
racine(120031)
from typing import List
def intervale2(a : int, b : int, pas : int=1) -> List[int]:
assert pas > 0
liste = []
i = a
while i <= b:
liste.append(i)
i += pas
return liste
intervale2(2, 12, 1)
intervale2(2, 12, 3)
def estDivisible(n : int, k : int) -> bool:
return (n % k) == 0
estDivisible(10, 2)
estDivisible(10, 3)
estDivisible(10, 4)
estDivisible(10, 5)
def estPremier(n : int) -> bool:
return (n == 2) or (n == 3) or not any(map(lambda k: estDivisible(n, k), intervale2(2, racine(n), 1)))
for n in range(2, 20):
print(n, list(map(lambda k: estDivisible(n, k), intervale2(2, racine(n), 1))))
from typing import List
def premiers(n : int) -> List[int]:
return [p for p in intervale2(2, n, 1) if estPremier(p)]
premiers(10)
premiers(100)
class ListeChainee():
def __init__(self, hd=None, tl=None):
self.hd = hd
self.tl = tl
def __repr__(self) -> str:
if self.tl is None:
if self.hd is None:
return "[]"
else:
return f"{self.hd} :: []"
else:
return f"{self.hd} :: {self.tl}"
def jolie(self) -> str:
if self.tl is None:
if self.hd is None:
return "[]"
else:
return f"[{self.hd}]"
else:
j = self.tl.jolie()
j = j.replace("[", "").replace("]", "")
if j == "":
return f"[{self.hd}]"
else:
return f"[{self.hd}, {j}]"
# équivalent à :: en OCaml
def insert(hd, tl: ListeChainee) -> ListeChainee:
Insère hd en début de la liste chainée tl.
return ListeChainee(hd=hd, tl=tl)
# liste vide, puis des listes plus grandes
vide = ListeChainee() # []
l_1 = insert(1, vide) # 1 :: [] ~= [1]
l_12 = insert(2, l_1) # 2 :: 1 :: [] ~= [2, 1]
l_123 = insert(3, l_12) # 3 :: 2 :: 1 :: []
print(vide) # []
print(l_1) # 1 :: []
print(l_12) # 2 :: 1 :: []
print(l_123) # 3 :: 2 :: 1 :: []
print(vide.jolie()) # []
print(l_1.jolie()) # [1]
print(l_12.jolie()) # [2, 1]
print(l_123.jolie()) # [3, 2, 1]
from typing import Optional
def taille(liste: Optional[ListeChainee]) -> int:
if liste is None:
return 0
elif liste.tl is None:
return 0 if liste.hd is None else 1
return 1 + taille(liste.tl)
print(taille(vide)) # 0
print(taille(l_1)) # 1
print(taille(l_12)) # 2
print(taille(l_123)) # 3
def copy(liste: ListeChainee) -> ListeChainee:
if liste.tl is None:
return ListeChainee(hd=liste.hd, tl=None)
else:
return ListeChainee(hd=liste.hd, tl=copy(liste.tl))
print(id(vide))
print(id(copy(vide)))
def concat(liste1: ListeChainee, liste2: ListeChainee) -> ListeChainee:
if taille(liste1) == 0:
return liste2
elif taille(liste2) == 0:
return liste1
# nouvelle liste : comme ça changer queue.tl ne modifie PAS liste1
resultat = copy(liste1)
queue = resultat
while taille(queue.tl) > 0:
queue = queue.tl
assert taille(queue.tl) == 0
queue.tl = ListeChainee(hd=liste2.hd, tl=liste2.tl)
return resultat
print(concat(vide, l_1))
print(vide) # pas modifiée : []
print(l_1) # pas modifiée : 1 :: []
concat(l_1, l_12) # 1 :: 2 :: 1 :: []
concat(l_1, l_123) # 1 :: 3 :: 2 :: 1 :: []
concat(l_1, vide) # 1 :: []
concat(l_12, vide) # 2 :: 1 :: []
concat(l_12, l_1) # 2 :: 1 :: 1 :: []
concat(l_123, l_123) # 3 :: 2 :: 1 :: 3 :: 2 :: 1 :: []
def appartient(x, liste: ListeChainee) -> bool:
if liste.hd is None:
return False
else:
if liste.hd == x:
return True
else:
return appartient(x, liste.tl)
assert appartient(0, vide) == False
assert appartient(0, l_1) == False
assert appartient(0, l_12) == False
assert appartient(0, l_123) == False
assert appartient(1, l_1) == True
assert appartient(1, l_12) == True
assert appartient(1, l_123) == True
assert appartient(2, l_1) == False
assert appartient(2, l_12) == True
assert appartient(2, l_123) == True
assert appartient(3, l_1) == False
assert appartient(3, l_12) == False
assert appartient(3, l_123) == True
def miroir(liste: ListeChainee) -> ListeChainee:
if taille(liste) <= 1:
return copy(liste)
else:
hd, tl = liste.hd, copy(liste.tl) # O(n)
juste_hd = ListeChainee(hd=hd, tl=None) # O(1)
return concat(miroir(tl), juste_hd) # O(n^2) + O(n) à cause de concat
print(miroir(vide)) # [] => []
print(miroir(l_1)) # [1] => [1]
print(miroir(l_12)) # [2, 1] => [1, 2]
print(miroir(l_123)) # [3, 2, 1] => [1, 2, 3]
def alterne(liste1: ListeChainee, liste2: ListeChainee) -> ListeChainee:
if taille(liste1) == 0:
return copy(liste2) # on recopie pour ne rien modifier
if taille(liste2) == 0:
return copy(liste1) # on recopie pour ne rien modifier
h1, t1 = liste1.hd, liste1.tl
h2, t2 = liste2.hd, liste2.tl
return insert(h1, insert(h2, alterne(t1, t2)))
print(alterne(l_1, l_12)) # [1, 2, 1]
print(alterne(l_12, l_1)) # [2, 1, 1]
print(alterne(l_123, l_1)) # [3, 1, 2, 1]
print(alterne(l_123, l_12)) # [3, 2, 2, 1, 1]
print(alterne(l_123, l_123)) # [3, 3, 2, 2, 1, 1]
print(alterne(l_12, l_123)) # [2, 3, 1, 2, 1]
print(alterne(l_1, l_123)) # [1, 3, 2, 1]
def nb_occurrences(x, liste: ListeChainee) -> int:
if liste is None or liste.hd is None:
return 0
else:
count = 1 if x == liste.hd else 0
if liste.tl is None:
return count
else:
return count + nb_occurrences(x, liste.tl)
assert nb_occurrences(1, vide) == 0
assert nb_occurrences(1, l_1) == 1
assert nb_occurrences(1, l_12) == 1
assert nb_occurrences(2, l_12) == 1
assert nb_occurrences(1, l_123) == 1
assert nb_occurrences(2, l_123) == 1
assert nb_occurrences(3, l_123) == 1
assert nb_occurrences(1, concat(l_1, l_1)) == 2
assert nb_occurrences(2, concat(l_1, l_12)) == 1
assert nb_occurrences(3, concat(l_12, l_1)) == 0
assert nb_occurrences(1, concat(l_12, l_12)) == 2
assert nb_occurrences(2, concat(l_12, l_12)) == 2
assert nb_occurrences(1, concat(l_123, concat(l_1, l_1))) == 3
assert nb_occurrences(2, concat(l_123, concat(l_1, l_12))) == 2
assert nb_occurrences(3, concat(l_123, concat(l_12, l_1))) == 1
assert nb_occurrences(3, concat(l_123, concat(l_12, l_12))) == 1
def nb_occurrences(x, liste: ListeChainee, count=0) -> int:
if liste is None or liste.hd is None:
return count
else:
count += 1 if x == liste.hd else 0
if liste.tl is None:
return count
else:
return nb_occurrences(x, liste.tl, count=count)
def filtrer(liste: ListeChainee, predicate) -> ListeChainee:
if liste is None or liste.hd is None: # liste de taille 0
return ListeChainee(hd=None, tl=None)
elif liste.tl is None: # liste de taille 1
if predicate(liste.hd): # on renvoie [hd]
return ListeChainee(hd=liste.hd, tl=None)
else: # on renvoie []
return ListeChainee(hd=None, tl=None)
else: # liste de taille >= 2
if predicate(liste.hd):
return insert(liste.hd, filtrer(liste.tl, predicate))
else:
return filtrer(liste.tl, predicate)
def pairs(liste: ListeChainee) -> ListeChainee:
def predicate(x):
return (x % 2) == 0
# aussi : predicate = lambda x: (x % 2) == 0
return filtrer(liste, predicate)
def impairs(liste: ListeChainee) -> ListeChainee:
def predicate(x):
return (x % 2) == 1
return filtrer(liste, predicate)
print(pairs(vide)) # []
print(pairs(l_1)) # []
print(pairs(l_12)) # [2]
print(pairs(l_123)) # [2]
print(pairs(insert(4, insert(6, insert(8, l_123))))) # [4, 6, 8, 2]
print(pairs(insert(5, insert(6, insert(8, l_123))))) # [6, 8, 2]
print(impairs(vide)) # []
print(impairs(l_1)) # [1]
print(impairs(l_12)) # [1]
print(impairs(l_123)) # [3, 1]
print(impairs(insert(4, insert(6, insert(8, l_123))))) # [3, 1]
print(impairs(insert(5, insert(6, insert(8, l_123))))) # [5, 3, 1]
def myrange(n: int) -> ListeChainee:
if n <= 0:
return ListeChainee(hd=None, tl=None)
elif n == 1:
return ListeChainee(hd=1, tl=None)
# return insert(1, vide)
else:
return ListeChainee(hd=n, tl=myrange(n-1))
print(myrange(1)) # [1]
print(myrange(2)) # [1, 2]
print(myrange(3)) # [1, 2, 3]
print(myrange(4)) # [1, 2, 3, 4]
def intervale(a: int, b: Optional[int]=None) -> ListeChainee:
if b is None:
a, b = 1, a
n = b - a
if n < 0: # [a..b] = []
return ListeChainee(hd=None, tl=None)
elif n == 0: # [a..b] = [a]
return ListeChainee(hd=a, tl=None)
else: # [a..b] = a :: [a+1..b]
return ListeChainee(hd=a, tl=intervale(a+1, b))
print(intervale(10)) # [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(intervale(1, 4)) # [1, 2, 3, 4]
print(intervale(13, 13)) # [13]
print(intervale(13, 10)) # []
from typing import Callable
def mymap(fonction: Callable, liste: ListeChainee) -> ListeChainee:
if liste is None or liste.hd is None: # liste de taille 0
return ListeChainee(hd=None, tl=None)
elif liste.tl is None: # liste de taille 1
return ListeChainee(hd=fonction(liste.hd), tl=None)
else: # liste de taille >= 2
return ListeChainee(hd=fonction(liste.hd), tl=mymap(fonction, liste.tl))
print(myrange(10))
print(mymap(lambda x: x, myrange(10)))
def intervale_bis(a: int, b: int) -> ListeChainee:
return miroir(mymap(lambda x: x + (a - 1), myrange(b - a + 1)))
print(intervale_bis(1, 10)) # [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(intervale_bis(1, 4)) # [1, 2, 3, 4]
print(intervale_bis(13, 13)) # [13]
print(intervale_bis(13, 10)) # []
def racine(n: int) -> int:
i = 1
for i in range(n + 1):
if i*i > n:
return i - 1
return i
print(racine(1)) # 1
print(racine(5)) # 2
print(racine(102)) # 10
print(racine(120031)) # 346
def intervale2(a: int, b: Optional[int]=None, pas: int=1) -> ListeChainee:
if b is None:
a, b = 1, a
n = b - a
if n < 0: # [a..b::p] = []
return ListeChainee(hd=None, tl=None)
elif n == 0: # [a..b::p] = [a]
return ListeChainee(hd=a, tl=None)
else: # [a..b::p] = a :: [a+p..b::p]
return ListeChainee(hd=a, tl=intervale2(a + pas, b=b, pas=pas))
print(intervale2(1, 10, 2)) # [1, 3, 5, 7, 9]
print(intervale2(1, 4, 2)) # [1, 3]
print(intervale2(13, 13, 2)) # [13]
print(intervale2(13, 10, 2)) # []
def estDivisible(n: int, k: int) -> bool:
return (n % k) == 0
estDivisible(10, 2)
estDivisible(10, 3)
estDivisible(10, 4)
estDivisible(10, 5)
def estPremier(n : int) -> bool:
return taille(filtrer(intervale2(2, racine(n), 1), lambda k: estDivisible(n, k))) == 0
for n in range(2, 20):
print("Petits diviseurs de", n, " -> ", filtrer(intervale2(2, racine(n), 1), lambda k: estDivisible(n, k)))
def premiers(n : int) -> ListeChainee:
return filtrer(intervale2(2, n, 1), estPremier)
premiers(10) # [2, 3, 5, 7]
premiers(100) # [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
test = [3, 1, 8, 4, 5, 6, 1, 2]
from typing import TypeVar, List
_a = TypeVar('alpha')
def insere(x : _a, liste : List[_a]) -> List[_a]:
if len(liste) == 0:
return [x]
else:
t, q = liste[0], liste[1:]
if x <= t:
return [x] + liste
else:
return [t] + insere(x, q)
def tri_insertion(liste : List[_a]) -> List[_a]:
if len(liste) == 0:
return []
else:
t, q = liste[0], liste[1:]
return insere(t, tri_insertion(q))
tri_insertion(test)
from typing import TypeVar, List, Callable
_a = TypeVar('alpha')
def insere2(ordre : Callable[[_a, _a], bool], x : _a, liste : List[_a]) -> List[_a]:
if len(liste) == 0:
return [x]
else:
t, q = liste[0], liste[1:]
if ordre(x, t):
return [x] + liste
else:
return [t] + insere2(ordre, x, q)
def tri_insertion2(ordre : Callable[[_a, _a], bool], liste : List[_a]) -> List[_a]:
if len(liste) == 0:
return []
else:
t, q = liste[0], liste[1:]
return insere2(ordre, t, tri_insertion2(ordre, q))
ordre_croissant = lambda x, y: x <= y
tri_insertion2(ordre_croissant, test)
ordre_decroissant = lambda x, y: x >= y
tri_insertion2(ordre_decroissant, test)
from typing import TypeVar, List, Tuple
_a = TypeVar('alpha')
def selectionne_min(liste : List[_a]) -> Tuple[_a, List[_a]]:
if len(liste) == 0:
raise ValueError("Selectionne_min sur liste vide")
else:
def cherche_min(mini : _a, autres : List[_a], reste : List[_a]) -> Tuple[_a, List[_a]]:
if len(reste) == 0:
return (mini, autres)
else:
t, q = reste[0], reste[1:]
if t < mini:
return cherche_min(t, [mini] + autres, q)
else:
return cherche_min(mini, [t] + autres, q)
t, q = liste[0], liste[1:]
return cherche_min(t, [], q)
test
selectionne_min(test)
def tri_selection(liste : List[_a]) -> List[_a]:
if len(liste) == 0:
return []
else:
mini, autres = selectionne_min(liste)
return [mini] + tri_selection(autres)
tri_selection(test)
from typing import TypeVar, List, Tuple
_a = TypeVar('alpha')
def separe(liste : List[_a]) -> Tuple[List[_a], List[_a]]:
if len(liste) == 0:
return ([], [])
elif len(liste) == 1:
return ([liste[0]], [])
else:
x, y, q = liste[0], liste[1], liste[2:]
a, b = separe(q)
return ([x] + a, [y] + b)
test
separe(test)
def fusion(liste1 : List[_a], liste2 : List[_a]) -> List[_a]:
if (len(liste1), len(liste2)) == (0, 0):
return []
elif len(liste1) == 0:
return liste2
elif len(liste2) == 0:
return liste1
else: # les deux sont non vides
x, a = liste1[0], liste1[1:]
y, b = liste2[0], liste2[1:]
if x <= y:
return [x] + fusion(a, [y] + b)
else:
return [y] + fusion([x] + a, b)
fusion([1, 3, 7], [2, 3, 8])
def tri_fusion(liste : List[_a]) -> List[_a]:
if len(liste) <= 1:
return liste
else:
a, b = separe(liste)
return fusion(tri_fusion(a), tri_fusion(b))
tri_fusion(test)
%timeit tri_insertion(test)
%timeit tri_selection(test)
%timeit tri_fusion(test)
from sys import setrecursionlimit
setrecursionlimit(100000)
# nécessaire pour tester les différentes fonctions récursives sur de grosses listes
import random
def test_random(n : int) -> List[int]:
return [random.randint(-1000, 1000) for _ in range(n)]
for n in [10, 100, 1000]:
print("\nFor n =", n)
for tri in [tri_insertion, tri_selection, tri_fusion]:
print(" and tri = {}".format(tri.__name__))
%timeit tri(test_random(n))
from typing import TypeVar, List, Callable
_a, _b = TypeVar('_a'), TypeVar('_b')
def applique(f : Callable[[_a], _b], liste : List[_a]) -> List[_b]:
# Triche :
return list(map(f, liste))
# 1ère approche :
return [f(x) for x in liste]
# 2ème approche :
fliste = []
for x in liste:
fliste.append(f(x))
return fliste
# 3ème approche
n = len(liste)
if n == 0: return []
fliste = [liste[0] for _ in range(n)]
for i in range(n):
fliste[i] = f(liste[i])
return fliste
def premiers_carres_parfaits(n : int) -> List[int]:
return applique(lambda x : x * x, list(range(1, n + 1)))
premiers_carres_parfaits(12)
from typing import TypeVar, List, Callable
_a = TypeVar('_a')
def itere(f : Callable[[_a], None], liste : List[_a]) -> None:
for x in liste:
f(x)
print_int = lambda i: print("{}".format(i))
def affiche_liste_entiers(liste : List[int]) -> None:
print("Debut")
itere(print_int, liste)
print("Fin")
affiche_liste_entiers([1, 2, 4, 5, 12011993])
from typing import TypeVar, List, Callable
_a = TypeVar('_a')
# Comme all(map(f, liste))
def qqsoit(f : Callable[[_a], bool], liste : List[_a]) -> bool:
for x in liste:
if not f(x): return False # arret preliminaire
return True
# Comme any(map(f, liste))
def ilexiste(f : Callable[[_a], bool], liste : List[_a]) -> bool:
for x in liste:
if f(x): return True # arret preliminaire
return False
qqsoit(lambda x: (x % 2) == 0, [1, 2, 3, 4, 5])
ilexiste(lambda x: (x % 2) == 0, [1, 2, 3, 4, 5])
%timeit qqsoit(lambda x: (x % 2) == 0, [1, 2, 3, 4, 5])
%timeit all(map(lambda x: (x % 2) == 0, [1, 2, 3, 4, 5]))
%timeit ilexiste(lambda x: (x % 2) == 0, [1, 2, 3, 4, 5])
%timeit any(map(lambda x: (x % 2) == 0, [1, 2, 3, 4, 5]))
def appartient_curry(x : _a) -> Callable[[List[_a]], bool]:
return lambda liste: ilexiste(lambda y: x == y, liste)
def appartient(x : _a, liste : List[_a]) -> bool:
return ilexiste(lambda y: x == y, liste)
def toutes_egales(x : _a, liste : List[_a]) -> bool:
return qqsoit(lambda y: x == y, liste)
appartient_curry(1)([1, 2, 3])
appartient(1, [1, 2, 3])
appartient(5, [1, 2, 3])
toutes_egales(1, [1, 2, 3])
toutes_egales(5, [1, 2, 3])
%timeit appartient(random.randint(-10, 10), [random.randint(-1000, 1000) for _ in range(1000)])
%timeit random.randint(-10, 10) in [random.randint(-1000, 1000) for _ in range(1000)]
from typing import TypeVar, List, Callable
_a = TypeVar('_a')
# Comme list(filter(f, liste))
def filtre(f : Callable[[_a], bool], liste : List[_a]) -> List[_a]:
# return [x for x in liste if f(x)]
liste2 = []
for x in liste:
if f(x):
liste2.append(x)
return liste2
filtre(lambda x: (x % 2) == 0, [1, 2, 3, 4, 5])
filtre(lambda x: (x % 2) != 0, [1, 2, 3, 4, 5])
pairs = lambda liste: filtre(lambda x: (x % 2) == 0, liste)
impairs = lambda liste: filtre(lambda x: (x % 2) != 0, liste)
pairs(list(range(10)))
impairs(list(range(10)))
from typing import TypeVar, List, Callable
_a = TypeVar('_a')
# Comme list(filter(f, liste))
def reduit_rec(f : Callable[[_a, _b], _a], acc : _a, liste : List[_b]) -> _a:
if len(liste) == 0:
return acc
else:
h, q = liste[0], liste[1:]
return reduit(f, f(acc, h), q)
# Version non récursive, bien plus efficace
def reduit(f : Callable[[_a, _b], _a], acc : _a, liste : List[_b]) -> _a:
acc_value = acc
for x in liste:
acc_value = f(acc_value, x)
return acc_value
from operator import add
somme_rec = lambda liste: reduit_rec(add, 0, liste)
somme = lambda liste: reduit(add, 0, liste)
somme_rec(list(range(10)))
somme(list(range(10)))
sum(list(range(10)))
%timeit somme_rec(list(range(10)))
%timeit somme(list(range(10)))
%timeit sum(list(range(10)))
%timeit somme_rec(list(range(1000)))
%timeit somme(list(range(1000)))
%timeit sum(list(range(1000)))
from operator import mul
produit = lambda liste: reduit(mul, 1, liste)
produit(list(range(1, 6))) # 5! = 120
def factorielle(n : int) -> int:
return produit(range(1, n + 1))
for n in range(1, 15):
print("{:>7}! = {:>13}".format(n, factorielle(n)))
miroir = lambda liste: reduit(lambda l, x : [x] + l, [], liste)
miroir([2, 3, 5, 7, 11])
from typing import Dict, Optional, Tuple
# Impossible de définir un type récursivement, pas comme en Caml
arbre_bin = Dict[str, Optional[Tuple[Dict, Dict]]]
from pprint import pprint
arbre_test = {'Noeud': (
{'Noeud': (
{'Noeud': (
{'Feuille': None},
{'Feuille': None}
)},
{'Feuille': None}
)},
{'Feuille': None}
)}
pprint(arbre_test)
Feuille = {'Feuille': None}
Noeud = lambda x, y : {'Noeud': (x, y)}
arbre_test = Noeud(Noeud(Noeud(Feuille, Feuille), Feuille), Feuille)
pprint(arbre_test)
def taille(a : arbre_bin) -> int:
# Pattern matching ~= if, elif,.. sur les clés de la profondeur 1
# du dictionnaire (une seule clé)
if 'Feuille' in a:
return 1
elif 'Noeud' in a:
x, y = a['Noeud']
return 1 + taille(x) + taille(y)
taille(arbre_test) # 7
def hauteur(a : arbre_bin) -> int:
if 'Feuille' in a:
return 0
elif 'Noeud' in a:
x, y = a['Noeud']
return 1 + max(hauteur(x), hauteur(y))
hauteur(arbre_test) # 3
from typing import TypeVar, Union, List
F = TypeVar('F')
N = TypeVar('N')
element_parcours = Union[F, N]
parcours = List[element_parcours]
def parcours_prefixe(a : arbre_bin) -> parcours:
if 'Feuille' in a:
return [F]
elif 'Noeud' in a:
g, d = a['Noeud']
return [N] + parcours_prefixe(g) + parcours_prefixe(d)
parcours_prefixe(arbre_test)
def parcours_postfixe(a : arbre_bin) -> parcours:
if 'Feuille' in a:
return [F]
elif 'Noeud' in a:
g, d = a['Noeud']
return parcours_postfixe(g) + parcours_postfixe(d) + [N]
parcours_postfixe(arbre_test)
def parcours_infixe(a : arbre_bin) -> parcours:
if 'Feuille' in a:
return [F]
elif 'Noeud' in a:
g, d = a['Noeud']
return parcours_infixe(g) + [N] + parcours_infixe(d)
parcours_infixe(arbre_test)
def parcours_prefixe2(a : arbre_bin) -> parcours:
def parcours(vus, b):
if 'Feuille' in b:
vus.insert(0, F)
return vus
elif 'Noeud' in b:
vus.insert(0, N)
g, d = b['Noeud']
return parcours(parcours(vus, g), d)
p = parcours([], a)
return p[::-1]
parcours_prefixe2(arbre_test)
def parcours_postfixe2(a : arbre_bin) -> parcours:
def parcours(vus, b):
if 'Feuille' in b:
vus.insert(0, F)
return vus
elif 'Noeud' in b:
g, d = b['Noeud']
p = parcours(parcours(vus, g), d)
p.insert(0, N)
return p
p = parcours([], a)
return p[::-1]
parcours_postfixe2(arbre_test)
def parcours_infixe2(a : arbre_bin) -> parcours:
def parcours(vus, b):
if 'Feuille' in b:
vus.insert(0, F)
return vus
elif 'Noeud' in b:
g, d = b['Noeud']
p = parcours(vus, g)
p.insert(0, N)
return parcours(p, d)
p = parcours([], a)
return p[::-1]
parcours_infixe2(arbre_test)
from collections import deque
def parcours_largeur(a : arbre_bin) -> parcours:
file = deque()
# fonction avec effet de bord sur la file
def vasy() -> parcours:
if len(file) == 0:
return []
else:
b = file.pop()
if 'Feuille' in b:
# return [F] + vasy()
v = vasy()
v.insert(0, F)
return v
elif 'Noeud' in b:
g, d = b['Noeud']
file.insert(0, g)
file.insert(0, d)
# return [N] + vasy()
v = vasy()
v.insert(0, N)
return v
file.insert(0, a)
return vasy()
parcours_largeur(arbre_test)
def parcours_profondeur(a : arbre_bin) -> parcours:
pile = []
# fonction avec effet de bord sur la file
def vasy() -> parcours:
if len(pile) == 0:
return []
else:
b = pile.pop()
if 'Feuille' in b:
# return [F] + vasy()
v = vasy()
v.append(F)
return v
elif 'Noeud' in b:
g, d = b['Noeud']
pile.append(g)
pile.append(d)
# return [N] + vasy()
v = vasy()
v.insert(0, N)
return v
pile.append(a)
return vasy()
parcours_profondeur(arbre_test)
test_prefixe = parcours_prefixe2(arbre_test)
test_prefixe
from typing import Tuple
def reconstruit_prefixe(par : parcours) -> arbre_bin:
def reconstruit(p : parcours) -> Tuple[arbre_bin, parcours]:
if len(p) == 0:
raise ValueError("parcours invalide pour reconstruit_prefixe")
elif p[0] == F:
return (Feuille, p[1:])
elif p[0] == N:
g, q = reconstruit(p[1:])
d, r = reconstruit(q)
return (Noeud(g, d), r)
# call it
a, p = reconstruit(par)
if len(p) == 0:
return a
else:
raise ValueError("parcours invalide pour reconstruit_prefixe")
reconstruit_prefixe([F])
reconstruit_prefixe(test_prefixe)
reconstruit_prefixe([N, F, F] + test_prefixe) # échoue
largeur_test = parcours_largeur(arbre_test)
largeur_test
from collections import deque
def reconstruit_largeur(par : parcours) -> arbre_bin:
file = deque()
# Fonction avec effets de bord
def lire_element(e : element_parcours) -> None:
if e == F:
file.append(Feuille)
elif e == N:
d = file.popleft()
g = file.popleft() # attention à l'ordre !
file.append(Noeud(g, d))
# Applique cette fonction à chaque élement du parcours
for e in reversed(par):
lire_element(e)
if len(file) == 1:
return file.popleft()
else:
raise ValueError("parcours invalide pour reconstruit_largeur")
largeur_test
reconstruit_largeur(largeur_test)
arbre_test
from collections import deque
def reconstruit_prefixe2(par : parcours) -> arbre_bin:
pile = deque()
# Fonction avec effets de bord
def lire_element(e : element_parcours) -> None:
if e == F:
pile.append(Feuille)
elif e == N:
g = pile.pop()
d = pile.pop() # attention à l'ordre !
pile.append(Noeud(g, d))
# Applique cette fonction à chaque élement du parcours
for e in reversed(par):
lire_element(e)
if len(pile) == 1:
return pile.pop()
else:
raise ValueError("parcours invalide pour reconstruit_prefixe2")
prefixe_test = parcours_prefixe2(arbre_test)
prefixe_test
reconstruit_prefixe2(prefixe_test)
arbre_test
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Script settings
Step2: We create one big dataframe, the columns are the sensors of type electricity
Step3: Convert Datetimeindex to local time
Step5: We define two low-level functions
Step6: Data handling
Step7: Plots
|
<ASSISTANT_TASK:>
Python Code:
import os, sys
import inspect
import numpy as np
import datetime as dt
import time
import pytz
import pandas as pd
import pdb
script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# add the path to opengrid to sys.path
sys.path.append(os.path.join(script_dir, os.pardir, os.pardir))
from opengrid.library import config
c=config.Config()
DEV = c.get('env', 'type') == 'dev' # DEV is True if we are in development environment, False if on the droplet
if not DEV:
# production environment: don't try to display plots
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import HourLocator, DateFormatter, AutoDateLocator
# find tmpo
sys.path.append(c.get('tmpo', 'folder'))
from opengrid.library.houseprint import houseprint
if DEV:
if c.get('env', 'plots') == 'inline':
%matplotlib inline
else:
%matplotlib qt
else:
pass # don't try to render plots
plt.rcParams['figure.figsize'] = 12,8
BXL = pytz.timezone('Europe/Brussels')
number_of_days = 7
hp = houseprint.load_houseprint_from_file('new_houseprint.pkl')
hp.init_tmpo()
start = pd.Timestamp(time.time() - number_of_days*86400, unit='s')
df = hp.get_data(sensortype='electricity', head=start, resample='s')
df = df.resample(rule='60s', how='max')
df = df.diff()*3600/60
df.index = df.index.tz_convert(BXL)
# plot a few dataframes to inspect them
if DEV:
for sensor in df.columns:
plt.figure()
df[sensor].plot()
def testvalid(row):
return row['maxima'] > 0 and row['maxima'] <> row['minima']
def get_minima(sensor):
Return the standby consumption for the covered days for a given sensor as an array.
Take care of days where this sensor has NO VALID standby consumption
global minima
res = np.ndarray(len(minima))
for i,df in enumerate(minima):
try:
res[i] = df[sensor]
except:
res[i] = np.nan
return res
index_slices = [] # will contain the correct index slices for each of the analysed nights
minima = [] # each element in minima is a dataframe with standby consumption per valid sensor
valid_sensors = set() # we keep track of all sensors that yield a valid standby consumption for at least one day.
# find the date for which we still have the full night (between 01:00 and 05:00). We will store it as datetime at 00:00 (local time)
hour = df.index[-1].hour # the hour of the last index.
if hour >= 5:
last_day = df.index[-1].date()
else:
last_day = (df.index[-1] - dt.timedelta(days=1)).date()
for day in range(number_of_days)[::-1]:
#pdb.set_trace()
dt_start = dt.datetime.combine(last_day - dt.timedelta(days=day), dt.time(0,0)) # start slicing at 01:00 local time
dt_stop = dt.datetime.combine(last_day - dt.timedelta(days=day), dt.time(5,0)) # stop slicing at 05:00 local time
df_night = df.ix[dt_start:dt_stop] # contains only data for a single night
index_slices.append(df_night.index.copy())
df_results = pd.DataFrame(index=df.columns) #df_results contains the results of the analysis for a single night. Index = sensorid
df_results['minima'] = df_night.min(axis=0)
df_results['maxima'] = df_night.max(axis=0)
df_results['valid'] = df_results.apply(testvalid, axis=1)
minima.append(df_results['minima'].ix[df_results.valid])
valid_sensors.update(set(minima[-1].index.tolist()))
index_slices_days = [x[0] for x in index_slices[1:]]
index = pd.DatetimeIndex(freq='D', start=index_slices_days[0], periods=number_of_days)
df_=pd.concat(minima, axis=1)
df_.columns = index
df_statistics = df_.describe().T
df_statistics
for sensor in list(valid_sensors)[:]:
plt.figure(figsize=(10,8))
ax1=plt.subplot(211)
ax1.plot_date(df_statistics.index, df_statistics[u'25%'], '-', lw=2, color='g', label=u'25%')
ax1.plot_date(df_statistics.index, df_statistics[u'50%'], '-', lw=2, color='orange', label=u'50%')
ax1.plot_date(df_statistics.index, df_statistics[u'75%'], '-', lw=2, color='r', label=u'75%')
ax1.plot_date(df_.T.index, df_.T[sensor], 'rD', ms=7)
xticks = [x.strftime(format='%d/%m') for x in df_statistics.index]
locs, lables=plt.xticks()
plt.xticks(locs, xticks, rotation='vertical')
plt.title(hp.find_sensor(sensor).device.key + ' - ' + sensor)
ax1.grid()
ax1.set_ylabel('Watt')
ax2=plt.subplot(212)
try:
ax2.plot(index_slices[-1], df.ix[index_slices[-1]][sensor], 'b-', label='Afgelopen nacht')
ax2.xaxis_date(BXL) #Put timeseries plot in local time
# rotate the labels
plt.xticks(rotation='vertical')
plt.legend()
ax2.set_ylabel('Watt')
except:
print "Could not create graph for {}".format(hp.find_sensor(sensor).device.key)
else:
plt.savefig(os.path.join(c.get('data', 'folder'), 'figures', 'standby_vertical_'+sensor+'.png'), dpi=100)
if not DEV:
plt.close()
try:
valid_sensors.remove('565de0a7dc64d8370aa321491217b85f') # the FLM of 3E does not fit in household standby benchmark
except:
pass
for sensor in valid_sensors:
plt.figure(figsize=(10,5))
ax1=plt.subplot(121)
box = [x.values for x in minima]
ax1.boxplot(box, positions=range(len(df_statistics)), notch=False)
ax1.plot(range(len(df_statistics)), get_minima(sensor), 'rD', ms=10, label='Sluipverbruik')
xticks = [x[0].strftime(format='%d/%m') for x in index_slices]
plt.xticks(range(len(df_statistics)), xticks, rotation='vertical')
#plt.title(hp.get_flukso_from_sensor(sensor) + ' - ' + sensor)
ax1.grid()
ax1.set_ylabel('Watt')
plt.legend(numpoints=1, frameon=False)
#ax1.set_xticklabels([t.strftime(format='%d/%m') for t in df_all_perday.index.tolist()])
ax2=plt.subplot(122)
try:
ax2.plot(index_slices[-1], df.ix[index_slices[-1]][sensor], 'b-', label='Afgelopen nacht')
ax2.xaxis_date(BXL) #Put timeseries plot in local time
# rotate the labels
plt.xticks(rotation='vertical')
ax2.set_ylabel('Watt')
ax2.grid()
plt.legend(loc='upper right', frameon=False)
plt.tight_layout()
except Exception as e:
print(e)
else:
plt.savefig(os.path.join(c.get('data', 'folder'), 'figures', 'standby_horizontal_'+sensor+'.png'), dpi=100)
if not DEV:
plt.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Helper methods
Step2: We also need some helper methods to evaluate the success of our strategy
Step3: Implementing the actual strategy
Step4: Performing the experiment
Step5: Let us run the experiment $n_{trials}$ times. Since we return values $1$ and $0$ for success and failure, respectively, we can find a good approximation of the success probability by computing the mean.
Step6: It seems strategy 2 from above is correct! The probability is significantly higher than $0.5$ and switching seems like a good choice. How is that possible?
Step7: Dependence on the rate parameter
Step8: Now let's create a couple of rates and plot the final result
Step9: We see that this curve is pretty noisy. To smoothen the noise, we can create a bunch of curves and average them. Alternatively we can create a bunch of success rates for every threshold rate and average them.
Step10: Let's plot the results!
Step11: We see that after the noise reduction we still have a pronounced feature. To fully appreciate that let's see what the deviation from the mean is for an eyeballed optimal rate of $\lambda = 2\, 10^{-5}$
Step12: We win! Even within a standard deviation the success rate is significant.
Step13: We can clearly see that with increasing the spread of the distribution the maximum is moving towars smaller rates. In fact you can roughly say that the maximum occurs at at a rate close to the inverse of the upper boundary. This makes sense since the expectation value $E(T) = 1/\lambda$ and thus an optimal threshold choice is given by the inverse of the upper limit of the money distribution.
Step14: Our advantage is gone! The fact that we do know something about the money distribution mysteriously decorrelates the envelopes and renders our strategy obsolete. We might as well stick with any envelope independent of the amount of money we find in it. However, this might not be a bad trade, since we now have a rough idea of what is in either envelope before we even open it.
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pymc as pm
from numpy.random import choice
%matplotlib inline
matplotlib.style.use('ggplot')
matplotlib.rc_params_from_file("../styles/matplotlibrc" ).update()
def switch_envelope(chosen_envelope):
if chosen_envelope == 'a':
return 'b'
else:
return 'a'
'''Find out if the final envelope _actually_ contains the highest value'''
def isLargerAmount(chosen_envelope, envelope_contents):
inverted_contents = create_inverted_envelope(envelope_contents)
if chosen_envelope == inverted_contents.get(np.max(inverted_contents.keys())):
return 1 # success
else:
return 0 # failure
'''We need an inverse lookup table to associate the highest value with an envelope'''
def create_inverted_envelope(envelope_contents):
dct = {}
for key in envelope_contents.keys():
dct[envelope_contents.get(key)] = key
return dct
def singleExperiment(money_distribution, threshold_distribution):
# create two identical envelopes with a random amount of money
envelope_contents = {'a': money_distribution.random().item(),
'b': money_distribution.random().item()}
#choose an envelope
chosen_envelope = choice(['a','b'])
#check for the amount and switch if necessary
if (threshold_distribution.random().item() >= envelope_contents[chosen_envelope]):
chosen_envelope = switch_envelope(chosen_envelope)
#evaluate whether strategy was a success
return isLargerAmount(chosen_envelope, envelope_contents)
money = pm.DiscreteUniform('money', 100, 100000)
threshold = pm.Exponential("threshold", 0.00005)
def run_n_experiments(n_trials, money, threshold):
lst = []
for idx in range(n_trials):
lst.append(singleExperiment(money, threshold))
return np.mean(lst)
print 'The success probability is approximately p = %0.3f' % run_n_experiments(100, money, threshold)
money = pm.DiscreteUniform('money', 100, 100000)
def createSuccessValuesFrom(rates):
success_values = []
for rate in rates:
threshold = pm.Exponential("threshold", rate)
success_values.append(run_n_experiments(1000, money, threshold))
return success_values
array_of_rates = np.logspace(-8, 0, num=100)
plt.semilogx(array_of_rates, createSuccessValuesFrom(array_of_rates))
def averageSuccessRate(threshold, number_of_repetitions):
trial = 0
lst = []
while trial < number_of_repetitions:
lst.append(run_n_experiments(100, money, threshold))
trial += 1
return np.mean(lst), np.std(lst)
def createSmoothSuccessValuesFrom(rates, number_of_repetitions):
success_values = []
stddev = []
for rate in rates:
threshold = pm.Exponential("threshold", rate)
success_values.append(averageSuccessRate(threshold, number_of_repetitions)[0])
stddev.append(averageSuccessRate(threshold, number_of_repetitions)[1])
return success_values, stddev
array_of_rates = np.logspace(-8, 0, num=50)
smoothened_rates = createSmoothSuccessValuesFrom(array_of_rates, 25)
plt.semilogx(array_of_rates, smoothened_rates[0])
threshold = pm.Exponential('threshold', 0.00002)
print 'The success probability is approximately p = %0.3f +/- %0.3f ' % averageSuccessRate(threshold, 100)
def createSuccessValuesWithMoneyRangeFrom(rates, money):
success_values = []
for rate in rates:
threshold = pm.Exponential("threshold", rate)
success_values.append(run_n_experiments(1000, money, threshold))
return success_values
money_1 = pm.DiscreteUniform('money', 100, np.power(10, 3))
money_2 = pm.DiscreteUniform('money', 100, np.power(10, 6))
money_3 = pm.DiscreteUniform('money', 100000, np.power(10, 9))
array_of_rates = np.logspace(-10, 0, num=100)
plt.semilogx(array_of_rates, createSuccessValuesWithMoneyRangeFrom(array_of_rates, money_1),
array_of_rates, createSuccessValuesWithMoneyRangeFrom(array_of_rates, money_2),
array_of_rates, createSuccessValuesWithMoneyRangeFrom(array_of_rates, money_3))
money_1 = pm.Normal('money', np.power(10,5), 100)
money_2 = pm.Normal('money', np.power(10,5), 500)
money_3 = pm.Normal('money', np.power(10,5), 10000)
array_of_rates = np.logspace(-8, 0, num=100)
plt.semilogx(array_of_rates, createSuccessValuesWithMoneyRangeFrom(array_of_rates, money_1),
array_of_rates, createSuccessValuesWithMoneyRangeFrom(array_of_rates, money_2),
array_of_rates, createSuccessValuesWithMoneyRangeFrom(array_of_rates, money_3))
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1
Step2: The variable g (quarterly GDP growth expressed as an annual rate) is now what Python calls a DataFrame, which is a collection of data organized by variable and observation. You can get some of its properties by typing some or all of the following in the box below
Step3: Example 2
Step4: Answer(s)? Aren't the boxplots in the last figure cool? The histograms above them? What do you see in them? How do the various returns compare?
Step5: Example 4
Step6: Note to self.
|
<ASSISTANT_TASK:>
Python Code:
x = [7, 3, 5]
x.pop?
# anything after the hashtag is a comment
# load packages
import datetime as dt
import pandas.io.data as web # data import tools
import matplotlib.pyplot as plt # plotting tools
# The next one is an IPython command: it says to put plots here in the notebook, rather than open a separate window.
%matplotlib inline
# get data from FRED
fred_series = ["GDPC1"]
start_date = dt.datetime(1960, 1, 1)
data = web.DataReader(fred_series, "fred", start_date)
# print last 3 data points to see what we've got
print(data.tail(3))
# compute annual growth rates
g = 4*data.pct_change()
# change label
g.columns = ['US GDP Growth']
# enter your commands here
# more examples: some statistics on GDP growth
print(['Mean GDP growth ', g.mean()])
print(['Std deviation ', g.std()])
# do this for subperiods...
# quick and dirty plot
# note the financial crisis: GDP fell 8% one quarter (at an annual rate, so really 2%)
g.plot()
plt.show()
# more complex plot, bar chart for last 6 quarters
# also: add moving average?
# load packages (if it's redundant it'll be ignored)
import pandas.io.data as web
# read data from Ken French's website
ff = web.DataReader('F-F_Research_Data_Factors', 'famafrench')[0]
# NB: ff.xs is a conflict, rename to xsm
ff.columns = ['xsm', 'smb', 'hml', 'rf']
# see what we've got
print(ff.head(3))
print(ff.describe())
# compute and print summary stats
moments = [ff.mean(), ff.std(), ff.skew(), ff.kurtosis() - 3]
# \n here is a line break
print('Summary stats for Fama-French factors (mean, std, skew, ex kurt)') #, end='\n\n')
print(moments)
#[print(moment, end='\n\n') for moment in moments]
# try some things yourself
# like what? type ff.[tab]
import pandas as pd
pd.__version__
# some plots
ff.plot()
plt.show()
ff.hist(bins=50, sharex=True)
plt.show()
ff.boxplot(whis=0, return_type='axes')
plt.show()
# load package under name wb
from pandas.io import wb
# find the codes for the variables of interest
wb.search
wb.search(string='gdp.*capita').iloc[:2]
# specify dates, variables, and countries
start = 2011
# GDP per capita, population, life expectancy
variable_list = ['NY.GDP.PCAP.KD', 'SP.POP.TOTL', 'SP.DYN.LE00.IN']
country_list = ['US', 'FR', 'JP', 'CN', 'IN', 'BR', 'MX']
# Python understands we need to go to the second line because ( hasn't been closed by )
data = wb.download(indicator=variable_list,
country=country_list, start=start, end=start).dropna()
# see what we've got
print(data)
# check the column labels, change to something simpler
print(data.columns)
data.columns = ['gdppc', 'pop', 'le']
print(data)
# scatterplot
# life expectancy v GDP per capita
# size of circles controlled by population
# load packages (ignored if redundant)
import numpy as np
import matplotlib.pyplot as plt
plt.scatter(data['gdppc'], data['le'], s=0.000001*data['pop'], alpha=0.5)
plt.ylabel('Life Expectancy')
plt.xlabel('GDP Per Capita')
plt.show()
# Note: size of circles based on population
# load packages
import pandas as pd
import pandas.io.data as web
from pandas.io.data import Options
import datetime as dt
import matplotlib.pylab as plt
# ticker
ticker = 'spy'
# load stock price first (the underlying)
# pick a recent date and subtract seven days to be sure we get a quote
# http://pymotw.com/2/datetime/#date-arithmetic
today = dt.date.today()
one_week = dt.timedelta(days=7)
start = today - one_week
stock = web.DataReader(ticker, 'yahoo', start)
print(stock) # just to see what we have
# take the last close (-1 is the last, 'Close' is the close)
# this shows up in our figure
atm = stock.ix[-1,'Close'] # the -1 takes the last observation
# get option prices for same ticker
option = Options(ticker, 'yahoo')
expiry = dt.date(2014, 11, 20)
data_calls = option.get_call_data(expiry=expiry).dropna()
data_puts = option.get_put_data(expiry=expiry).dropna()
# check what we have
print(data_calls.index)
print(data_calls.tail())
# compute mid of bid and ask and arrange series for plotting
calls_bid = data_calls['Bid']
calls_ask = data_calls['Ask']
calls_strikes = data_calls['Strike']
calls_mid = (data_calls['Bid'] + data_calls['Ask'])/2
puts_strikes = data_puts['Strike']
puts_mid = (data_puts['Bid'] + data_puts['Ask'])/2
# plot call and put prices v strike
plt.plot(calls_strikes, calls_mid, 'r', lw=2, label='calls')
plt.plot(puts_strikes, puts_mid, 'b', lw=2, label='puts')
# prettify it
#plt.axis([120, 250, 0, 50])
plt.axvline(x=atm, color='k', linestyle='--', label='ATM')
plt.legend(loc='best')
plt.show()
# rerun the figure above with different color lines. Or dashed lines for call and put prices.
# or change the form of the vertical ATM line: solid? another color?
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine.
Step3: Extract the dataset from the compressed .tar.gz file.
Step4: Problem 1
Step5: Problem 2
Step6: Problem 3
Step7: Finally, let's save the data for later reuse
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
import matplotlib.pyplot as plt
import numpy as np
import os
import tarfile
import urllib
from urllib.request import urlretrieve
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
import pickle
url = 'http://yaroslavvb.com/upload/notMNIST/'
def maybe_download(filename, expected_bytes):
Download a file if not present, and make sure it's the right size.
if not os.path.exists(filename):
filename, _ = urllib.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print ('Found and verified', filename)
else:
raise Exception('Failed to verify' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
def extract(filename):
tar = tarfile.open(filename)
tar.extractall()
tar.close()
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
data_folders = [os.path.join(root, d) for d in sorted(os.listdir(root))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_folders, len(data_folders)))
print (data_folders)
return data_folders
train_folders = extract(train_filename)
test_folders = extract(test_filename)
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load(data_folders, min_num_images, max_num_images):
dataset = np.ndarray(
shape=(max_num_images, image_size, image_size), dtype=np.float32)
labels = np.ndarray(shape=(max_num_images), dtype=np.int32)
label_index = 0
image_index = 0
for folder in data_folders:
print folder
for image in os.listdir(folder):
if image_index >= max_num_images:
raise Exception('More images than expected: %d >= %d' % (
num_images, max_num_images))
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[image_index, :, :] = image_data
labels[image_index] = label_index
image_index += 1
except IOError as e:
print 'Could not read:', image_file, ':', e, '- it\'s ok, skipping.'
label_index += 1
num_images = image_index
dataset = dataset[0:num_images, :, :]
labels = labels[0:num_images]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' % (
num_images, min_num_images))
print 'Full dataset tensor:', dataset.shape
print 'Mean:', np.mean(dataset)
print 'Standard deviation:', np.std(dataset)
print 'Labels:', labels.shape
return dataset, labels
train_dataset, train_labels = load(train_folders, 450000, 550000)
test_dataset, test_labels = load(test_folders, 18000, 20000)
np.random.seed(133)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
train_size = 200000
valid_size = 10000
valid_dataset = train_dataset[:valid_size,:,:]
valid_labels = train_labels[:valid_size]
train_dataset = train_dataset[valid_size:valid_size+train_size,:,:]
train_labels = train_labels[valid_size:valid_size+train_size]
print 'Training', train_dataset.shape, train_labels.shape
print 'Validation', valid_dataset.shape, valid_labels.shape
pickle_file = 'notMNIST.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print 'Unable to save data to', pickle_file, ':', e
raise
statinfo = os.stat(pickle_file)
print 'Compressed pickle size:', statinfo.st_size
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Builtin method to highlight clades
Step2: Or, use toyplot directly
Step3: More examples
|
<ASSISTANT_TASK:>
Python Code:
import toytree
import toyplot
# generate a random tree
tre = toytree.rtree.unittree(ntips=10, treeheight=100, seed=123)
# draw tree on canvas
canvas, axes, mark = tre.draw(ts='c', layout='r', tip_labels=True);
# get annotator tool
anno = toytree.utils.Annotator(tre, axes, mark)
# annotate clade by selecting names
anno.draw_clade_box(
names=['r0', 'r5'],
style={
"fill": 'red',
"fill-opacity": 0.15,
"stroke-width": 2,
"stroke": 'red',
"stroke-opacity": 0.3,
},
);
# draw tree on canvas
canvas, axes, mark = tre.draw(ts='o', layout='r', tip_labels=False);
# draw rectangles next to two clades
axes.rectangle(20, 40, -0.45, 3.45, color=toytree.colors[1], opacity=0.5)
axes.rectangle(20, 40, 3.55, 5.45, color=toytree.colors[2], opacity=0.5)
axes.rectangle(20, 40, 5.55, 9.45, color=toytree.colors[3], opacity=0.5)
axes.text(50, 1.5, "clade A", style={"text-anchor": "start", "fill": toytree.colors[1]})
axes.text(50, 4.5, "clade B", style={"text-anchor": "start", "fill": toytree.colors[2]})
axes.text(50, 7.5, "clade C", style={"text-anchor": "start", "fill": toytree.colors[3]});
import numpy as np
import string
tre = toytree.rtree.unittree(ntips=10, treeheight=100, seed=123)
tre = tre.set_node_values(
"name",
{i: str(i) + string.ascii_letters[:np.random.randint(5, 15)] for i in range(10)}
)
color = toytree.colors[1]
# draw tree on canvas
canvas, axes, mark = tre.draw(ts='c', layout='r', tip_labels=True);
# get annotator tool
anno = toytree.utils.Annotator(tre, axes, mark)
# annotate clade
anno.draw_clade_box(
tre.get_tip_labels()[:3],
yspace=tre.treenode.height / 15,
style={
"fill": color,
"fill-opacity": 0.25,
"stroke-width": 2,
"stroke": color,
"stroke-opacity": 0.5,
},
);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Restart the kernel
Step2: Set up your Google Cloud project
Step3: Region
Step4: Timestamp
Step5: Authenticate your Google Cloud account
Step6: Create a Cloud Storage bucket
Step7: Only if your bucket doesn't already exist
Step8: Finally, validate access to your Cloud Storage bucket by examining its contents
Step9: Service Account
Step10: Set service account access for Vertex AI Pipelines
Step11: Import libraries
Step12: Initialize Vertex AI SDK for Python
Step13: Location of Cloud Storage training data.
Step14: Create AutoML model evaluation component
Step15: Construct AutoML training pipeline
Step16: Get test item(s)
Step17: Copy test item(s)
Step18: Make the batch input file
Step19: Compile and execute the pipeline
Step20: View AutoML training pipeline results
Step21: Delete a pipeline job
Step22: Load an endpoint
Step23: Send a online prediction request
Step24: Make the prediction
Step25: Cleaning up
Step26: Delete the Cloud Storage bucket
|
<ASSISTANT_TASK:>
Python Code:
import os
# The Vertex AI Workbench Notebook product has specific requirements
IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME")
IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists(
"/opt/deeplearning/metadata/env_version"
)
# Vertex AI Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_WORKBENCH_NOTEBOOK:
USER_FLAG = "--user"
! pip3 install tensorflow-io==0.18 $USER_FLAG -q
! pip3 install --upgrade google-cloud-aiplatform \
google-cloud-pipeline-components \
google-cloud-logging \
pyarrow \
kfp $USER_FLAG -q
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "[your-region]" # @param {type: "string"}
if REGION == "[your-region]":
REGION = "us-central1"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Vertex AI Workbench, then don't execute this code
IS_COLAB = False
if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv(
"DL_ANACONDA_HOME"
):
if "google.colab" in sys.modules:
IS_COLAB = True
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
BUCKET_URI = f"gs://{BUCKET_NAME}"
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]":
BUCKET_URI = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_URI
! gsutil ls -al $BUCKET_URI
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your service account from gcloud
if not IS_COLAB:
shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].replace("*", "").strip()
if IS_COLAB:
shell_output = ! gcloud projects describe $PROJECT_ID
project_number = shell_output[-1].split(":")[1].strip().replace("'", "")
SERVICE_ACCOUNT = f"{project_number}-compute@developer.gserviceaccount.com"
print("Service Account:", SERVICE_ACCOUNT)
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_URI
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_URI
import base64
import json
import google.cloud.aiplatform as aiplatform
import tensorflow as tf
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import Artifact, Input, Output, component
aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)
IMPORT_FILE = (
"gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv"
)
@component(packages_to_install=["google-cloud-aiplatform"])
def evaluateAutoMLModelOp(
model: Input[Artifact], region: str, model_evaluation: Output[Artifact]
):
import logging
import google.cloud.aiplatform.gapic as gapic
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{region}-aiplatform.googleapis.com"}
model_service_client = gapic.ModelServiceClient(client_options=client_options)
model_id = model.metadata["resourceName"]
model_evaluations = model_service_client.list_model_evaluations(parent=model_id)
model_evaluation = list(model_evaluations)[0]
logging.info(model_evaluation)
PIPELINE_ROOT = "{}/pipeline_root/automl_icn_training".format(BUCKET_URI)
DEPLOY_COMPUTE = "n1-standard-4"
@dsl.pipeline(
name="automl-icn-training", description="AutoML image classification training"
)
def pipeline(
import_file: str,
batch_files: list,
display_name: str,
bucket: str = PIPELINE_ROOT,
project: str = PROJECT_ID,
region: str = REGION,
):
from google_cloud_pipeline_components import aiplatform as gcc_aip
dataset_op = gcc_aip.ImageDatasetCreateOp(
project=project,
display_name=display_name,
gcs_source=import_file,
import_schema_uri=aiplatform.schema.dataset.ioformat.image.single_label_classification,
)
training_op = gcc_aip.AutoMLImageTrainingJobRunOp(
project=project,
display_name=display_name,
prediction_type="classification",
model_type="CLOUD",
dataset=dataset_op.outputs["dataset"],
model_display_name=display_name,
training_fraction_split=0.6,
validation_fraction_split=0.2,
test_fraction_split=0.2,
budget_milli_node_hours=8000,
)
eval_op = evaluateAutoMLModelOp(model=training_op.outputs["model"], region=region)
batch_op = gcc_aip.ModelBatchPredictOp(
project=project,
job_display_name="batch_predict_job",
model=training_op.outputs["model"],
gcs_source_uris=batch_files,
gcs_destination_output_uri_prefix=bucket,
instances_format="jsonl",
predictions_format="jsonl",
model_parameters={},
machine_type=DEPLOY_COMPUTE,
starting_replica_count=1,
max_replica_count=1,
).after(eval_op)
endpoint_op = gcc_aip.EndpointCreateOp(
project=project,
location=region,
display_name=display_name,
).after(batch_op)
_ = gcc_aip.ModelDeployOp(
model=training_op.outputs["model"],
endpoint=endpoint_op.outputs["endpoint"],
automatic_resources_min_replica_count=1,
automatic_resources_max_replica_count=1,
traffic_split={"0": 100},
)
test_items = !gsutil cat $IMPORT_FILE | head -n2
if len(str(test_items[0]).split(",")) == 3:
_, test_item_1, test_label_1 = str(test_items[0]).split(",")
_, test_item_2, test_label_2 = str(test_items[1]).split(",")
else:
test_item_1, test_label_1 = str(test_items[0]).split(",")
test_item_2, test_label_2 = str(test_items[1]).split(",")
print(test_item_1, test_label_1)
print(test_item_2, test_label_2)
file_1 = test_item_1.split("/")[-1]
file_2 = test_item_2.split("/")[-1]
! gsutil cp $test_item_1 $BUCKET_URI/$file_1
! gsutil cp $test_item_2 $BUCKET_URI/$file_2
test_item_1 = BUCKET_URI + "/" + file_1
test_item_2 = BUCKET_URI + "/" + file_2
gcs_input_uri = BUCKET_URI + "/test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
data = {"content": test_item_1, "mime_type": "image/jpeg"}
f.write(json.dumps(data) + "\n")
data = {"content": test_item_2, "mime_type": "image/jpeg"}
f.write(json.dumps(data) + "\n")
print(gcs_input_uri)
! gsutil cat $gcs_input_uri
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="automl_icn_training.json"
)
pipeline = aiplatform.PipelineJob(
display_name="automl_icn_training",
template_path="automl_icn_training.json",
pipeline_root=PIPELINE_ROOT,
parameter_values={
"import_file": IMPORT_FILE,
"batch_files": [gcs_input_uri],
"display_name": "flowers" + TIMESTAMP,
"project": PROJECT_ID,
"region": REGION,
},
)
pipeline.run()
! rm -f automl_icn_training.json
PROJECT_NUMBER = pipeline.gca_resource.name.split("/")[1]
print(PROJECT_NUMBER)
def print_pipeline_output(job, output_task_name):
JOB_ID = job.name
print(JOB_ID)
for _ in range(len(job.gca_resource.job_detail.task_details)):
TASK_ID = job.gca_resource.job_detail.task_details[_].task_id
EXECUTE_OUTPUT = (
PIPELINE_ROOT
+ "/"
+ PROJECT_NUMBER
+ "/"
+ JOB_ID
+ "/"
+ output_task_name
+ "_"
+ str(TASK_ID)
+ "/executor_output.json"
)
GCP_RESOURCES = (
PIPELINE_ROOT
+ "/"
+ PROJECT_NUMBER
+ "/"
+ JOB_ID
+ "/"
+ output_task_name
+ "_"
+ str(TASK_ID)
+ "/gcp_resources"
)
EVAL_METRICS = (
PIPELINE_ROOT
+ "/"
+ PROJECT_NUMBER
+ "/"
+ JOB_ID
+ "/"
+ output_task_name
+ "_"
+ str(TASK_ID)
+ "/evaluation_metrics"
)
if tf.io.gfile.exists(EXECUTE_OUTPUT):
! gsutil cat $EXECUTE_OUTPUT
return EXECUTE_OUTPUT
elif tf.io.gfile.exists(GCP_RESOURCES):
! gsutil cat $GCP_RESOURCES
return GCP_RESOURCES
elif tf.io.gfile.exists(EVAL_METRICS):
! gsutil cat $EVAL_METRICS
return EVAL_METRICS
return None
print("image-dataset-create")
artifacts = print_pipeline_output(pipeline, "image-dataset-create")
print("\n\n")
print("automl-image-training-job")
artifacts = print_pipeline_output(pipeline, "automl-image-training-job")
print("\n\n")
output = !gsutil cat $artifacts
output = json.loads(output[0])
model_id = output["artifacts"]["model"]["artifacts"][0]["metadata"]["resourceName"]
print("\n")
print(model_id)
print("endpoint-create")
artifacts = print_pipeline_output(pipeline, "endpoint-create")
print("\n\n")
output = !gsutil cat $artifacts
output = json.loads(output[0])
endpoint_id = output["artifacts"]["endpoint"]["artifacts"][0]["metadata"][
"resourceName"
]
print("\n")
print(endpoint_id)
print("model-deploy")
artifacts = print_pipeline_output(pipeline, "model-deploy")
print("\n\n")
print("evaluateautomlmodelop")
artifacts = print_pipeline_output(pipeline, "evaluateautomlmodelop")
print("\n\n")
print("model-batch-predict")
artifacts = print_pipeline_output(pipeline, "model-batch-predict")
output = !gsutil cat $artifacts
output = json.loads(output[0])
print("\n\n")
print(
output["artifacts"]["batchpredictionjob"]["artifacts"][0]["metadata"][
"gcsOutputDirectory"
]
)
output = !gsutil cat $artifacts
output = json.loads(output[0])
batch_job_id = output["artifacts"]["batchpredictionjob"]["artifacts"][0]["metadata"][
"resourceName"
]
pipeline.delete()
endpoint = aiplatform.Endpoint(endpoint_id)
test_item = !gsutil cat $IMPORT_FILE | head -n1
if len(str(test_item[0]).split(",")) == 3:
_, test_item, test_label = str(test_item[0]).split(",")
else:
test_item, test_label = str(test_item[0]).split(",")
print(test_item, test_label)
with tf.io.gfile.GFile(test_item, "rb") as f:
content = f.read()
# The format of each instance should conform to the deployed model's prediction input schema.
instances = [{"content": base64.b64encode(content).decode("utf-8")}]
prediction = endpoint.predict(instances=instances)
print(prediction)
endpoint.undeploy_all()
endpoint.delete()
model = aiplatform.Model(model_id)
model.delete()
batch_job = aiplatform.BatchPredictionJob(batch_job_id)
batch_job.delete()
delete_bucket = False
if delete_bucket or os.getenv("IS_TESTING"):
! gsutil rm -r $BUCKET_URI
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prior to the instantiation of the backend we need to configure the credentials, the S3 storage folder and the device to be used (in the example the State Vector Simulator SV1)
Step2: Next we instantiate the engine with the AWSBraketBackend including the credentials and S3 configuration. By setting the 'use_hardware' parameter to False we indicate the use of the Simulator. In addition we set the number of times we want to run the circuit and the interval in secons to ask for the results. For a complete list of parameters and descriptions, please check the documentation.
Step3: We can now allocate the required qubits and create the circuit to be run. With the last instruction we ask the backend to run the circuit.
Step4: The backend will automatically create the task and generate a unique identifier (the task Arn) that can be used to recover the status of the task and results later on.
Step5: Retrieve results form a previous execution
Step6: We can plot an histogram with the probabilities as well.
|
<ASSISTANT_TASK:>
Python Code:
from projectq import MainEngine
from projectq.backends import AWSBraketBackend
from projectq.ops import Measure, H, C, X, All
creds = {
'AWS_ACCESS_KEY_ID': 'aws_access_key_id',
'AWS_SECRET_KEY': 'aws_secret_key',
} # replace with your Access key and Secret key
s3_folder = ['S3Bucket', 'S3Directory'] # replace with your S3 bucket and directory
device = 'SV1' # replace by the device you want to use
eng = MainEngine(AWSBraketBackend(use_hardware=False,
credentials=creds,
s3_folder=s3_folder,
num_runs=10,
interval=10))
# Allocate the required qubits
qureg = eng.allocate_qureg(3)
# Create the circuit. In this example a quantum teleportation algorithms that teleports the first qubit to the third one.
H | qureg[0]
H | qureg[1]
C(X) | (qureg[1], qureg[2])
C(X) | (qureg[0], qureg[1])
H | qureg[0]
C(X) | (qureg[1], qureg[2])
# At the end we measure the qubits to get the results; should be all-0 or all-1
All(Measure) | qureg
# And run the circuit
eng.flush()
# Obtain and print the probabilies of the states
prob_dict = eng.backend.get_probabilities(qureg)
print("Probabilites for each of the results: ", prob_dict)
# Set the Task Arn of the job to be retrieved and instantiate the engine with the AWSBraketBackend
task_arn = 'your_task_arn' # replace with the actual TaskArn you want to use
eng1 = MainEngine(AWSBraketBackend(retrieve_execution=task_arn, credentials=creds, num_retries=2, verbose=True))
# Configure the qubits to get the states probabilies
qureg1 = eng1.allocate_qureg(3)
# Ask the backend to retrieve the results
eng1.flush()
# Obtain and print the probabilities of the states
prob_dict1 = eng1.backend.get_probabilities(qureg1)
print("Probabilities ", prob_dict1)
import matplotlib.pyplot as plt
%matplotlib inline
from projectq.libs.hist import histogram
histogram(eng1.backend, qureg1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Local Training
Step2: Vertex Training using Vertex SDK and Custom Container
Step3: Initialize Vertex SDK
Step4: Create a Vertex Tensorboard Instance
Step5: Option
Step6: Training Output Artifact
Step7: Clean Up Artifact
|
<ASSISTANT_TASK:>
Python Code:
PROJECT_ID = "YOUR PROJECT ID"
BUCKET_NAME = "gs://YOUR BUCKET NAME"
REGION = "YOUR REGION"
SERVICE_ACCOUNT = "YOUR SERVICE ACCOUNT"
! gsutil ls -al $BUCKET_NAME
content_name = "pt-img-cls-multi-node-ddp-cust-cont"
! ls trainer
! cat trainer/requirements.txt
! pip install -r trainer/requirements.txt
! cat trainer/task.py
%run trainer/task.py --epochs 5 --no-cuda --local-mode
! ls ./tmp
! rm -rf ./tmp
hostname = "gcr.io"
image_name = content_name
tag = "latest"
custom_container_image_uri = f"{hostname}/{PROJECT_ID}/{image_name}:{tag}"
! cd trainer && docker build -t $custom_container_image_uri -f Dockerfile .
! docker run --rm $custom_container_image_uri --epochs 5 --no-cuda --local-mode
! docker push $custom_container_image_uri
! gcloud container images list --repository $hostname/$PROJECT_ID
! pip install -r requirements.txt
from google.cloud import aiplatform
aiplatform.init(
project=PROJECT_ID,
staging_bucket=BUCKET_NAME,
location=REGION,
)
content_name = content_name + "-cpu"
tensorboard = aiplatform.Tensorboard.create(
display_name=content_name,
)
display_name = content_name
gcs_output_uri_prefix = f"{BUCKET_NAME}/{display_name}"
replica_count = 4
machine_type = "n1-standard-4"
args = [
"--backend",
"gloo",
"--no-cuda",
"--batch-size",
"128",
"--epochs",
"25",
]
custom_container_training_job = aiplatform.CustomContainerTrainingJob(
display_name=display_name,
container_uri=custom_container_image_uri,
)
custom_container_training_job.run(
args=args,
base_output_dir=gcs_output_uri_prefix,
replica_count=replica_count,
machine_type=machine_type,
tensorboard=tensorboard.resource_name,
service_account=SERVICE_ACCOUNT,
)
print(f"Custom Training Job Name: {custom_container_training_job.resource_name}")
print(f"GCS Output URI Prefix: {gcs_output_uri_prefix}")
! gsutil ls $gcs_output_uri_prefix
! gsutil rm -rf $gcs_output_uri_prefix
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data
Step2: Plot the data
Step3: Identify first order model
Step4: Validation
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
import control
!wget https://alfkjartan.github.io/files/sysid_hw_data.mat
data = sio.loadmat("sysid_hw_data.mat")
N = len(data["u1"])
plt.figure(figsize=(14,1.7))
plt.step(range(N),data["u1"])
plt.ylabel("u_1")
plt.figure(figsize=(14,1.7))
plt.step(range(N),data["y1"])
plt.ylabel("y_1")
data["u1"].size
y = np.ravel(data["y1"])
u = np.ravel(data["u1"])
Phi = np.array([-y[1:N-1],
u[1:N-1],
u[:N-2]]).T
yy = y[2:]
theta_ls = np.linalg.lstsq(Phi, yy)
theta_ls
print("Estimated: a = %f" % theta_ls[0][0])
print("Estimated: b_0 = %f" % theta_ls[0][1])
print("Estimated: b_1 = %f" % theta_ls[0][2])
# Import the predict_lti function which will calculate the k-step ahead prediction.
from lti_fcns import predict_lti
yv = np.ravel(data["y1_val"])
uv = np.ravel(data["u1_val"])
k = 8 # The prediction horizon
d = 1 # The input delay of the system
a = np.array([1, theta_ls[0][0]])
b = np.ravel(theta_ls[0][1:])
(ypred, tpred) = predict_lti(b,a,yv, uv, k, d)
N = len(uv)
plt.figure(figsize=(14,3))
plt.step(range(N), yv)
plt.plot(tpred, ypred, 'ro')
# Calculate the Root Mean Square Error (RMSE) and fit (in %)
err = yv[tpred[0]:] - ypred
RMSE = np.sqrt(1.0/N * np.sum( np.square(yv[tpred[0]:] - ypred)))
fit = 100 * (1 - np.linalg.norm(err)/np.linalg.norm(yv - np.mean(yv)))
plt.title("RMSE = %f, fit = %f %%" % (RMSE, fit))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def find_max(words):
return sorted(words, key = lambda x: (-len(set(x)), x))[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: README
Step2: Other parameters to set
Step3: Pass hits to mothur aligner
Step4: Get aligned seqs that have > 50% matched to references
Step5: Search is done here (the computational intensive part). Hooray!
Step6: Classify SSU rRNA gene seqs using SILVA
Step7: Classify SSU rRNA gene seqs with Greengene for copy correction later
Step8: This part of pipeline (working with one sequence file) finishes here. Next we will combine samples for community analysis (see unsupervised analysis).
|
<ASSISTANT_TASK:>
Python Code:
cd ~/Desktop/SSUsearch/
mkdir -p ./workdir
#check seqfile files to process in data directory (make sure you still remember the data directory)
!ls ./data/test/data
Seqfile='./data/test/data/1c.fa'
Cpu='1' # number of maxixum threads for search and alignment
Hmm='./data/SSUsearch_db/Hmm.ssu.hmm' # hmm model for ssu
Gene='ssu'
Script_dir='./scripts'
Gene_model_org='./data/SSUsearch_db/Gene_model_org.16s_ecoli_J01695.fasta'
Ali_template='./data/SSUsearch_db/Ali_template.silva_ssu.fasta'
Start='577' #pick regions for de novo clustering
End='727'
Len_cutoff='100' # min length for reads picked for the region
Gene_tax='./data/SSUsearch_db/Gene_tax.silva_taxa_family.tax' # silva 108 ref
Gene_db='./data/SSUsearch_db/Gene_db.silva_108_rep_set.fasta'
Gene_tax_cc='./data/SSUsearch_db/Gene_tax_cc.greengene_97_otus.tax' # greengene 2012.10 ref for copy correction
Gene_db_cc='./data/SSUsearch_db/Gene_db_cc.greengene_97_otus.fasta'
# first part of file basename will the label of this sample
import os
Filename=os.path.basename(Seqfile)
Tag=Filename.split('.')[0]
import os
New_path = '{}:{}'.format('~/Desktop/SSUsearch/external_tools/bin/', os.environ['PATH'])
Hmm=os.path.abspath(Hmm)
Seqfile=os.path.abspath(Seqfile)
Script_dir=os.path.abspath(Script_dir)
Gene_model_org=os.path.abspath(Gene_model_org)
Ali_template=os.path.abspath(Ali_template)
Gene_tax=os.path.abspath(Gene_tax)
Gene_db=os.path.abspath(Gene_db)
Gene_tax_cc=os.path.abspath(Gene_tax_cc)
Gene_db_cc=os.path.abspath(Gene_db_cc)
os.environ.update(
{'PATH':New_path,
'Cpu':Cpu,
'Hmm':os.path.abspath(Hmm),
'Gene':Gene,
'Seqfile':os.path.abspath(Seqfile),
'Filename':Filename,
'Tag':Tag,
'Script_dir':os.path.abspath(Script_dir),
'Gene_model_org':os.path.abspath(Gene_model_org),
'Ali_template':os.path.abspath(Ali_template),
'Start':Start,
'End':End,
'Len_cutoff':Len_cutoff,
'Gene_tax':os.path.abspath(Gene_tax),
'Gene_db':os.path.abspath(Gene_db),
'Gene_tax_cc':os.path.abspath(Gene_tax_cc),
'Gene_db_cc':os.path.abspath(Gene_db_cc)})
!echo "*** make sure: parameters are right"
!echo "Seqfile: $Seqfile\nCpu: $Cpu\nFilename: $Filename\nTag: $Tag"
cd workdir
mkdir -p $Tag.ssu.out
### start hmmsearch
%%bash
echo "*** hmmsearch starting"
time hmmsearch --incE 10 --incdomE 10 --cpu $Cpu \
--domtblout $Tag.ssu.out/$Tag.qc.$Gene.hmmdomtblout \
-o /dev/null -A $Tag.ssu.out/$Tag.qc.$Gene.sto \
$Hmm $Seqfile
echo "*** hmmsearch finished"
!python $Script_dir/get-seq-from-hmmout.py \
$Tag.ssu.out/$Tag.qc.$Gene.hmmdomtblout \
$Tag.ssu.out/$Tag.qc.$Gene.sto \
$Tag.ssu.out/$Tag.qc.$Gene
%%bash
echo "*** Starting mothur align"
cat $Gene_model_org $Tag.ssu.out/$Tag.qc.$Gene > $Tag.ssu.out/$Tag.qc.$Gene.RFadded
# mothur does not allow tab between its flags, thus no indents here
time mothur "#align.seqs(candidate=$Tag.ssu.out/$Tag.qc.$Gene.RFadded, template=$Ali_template, threshold=0.5, flip=t, processors=$Cpu)"
rm -f mothur.*.logfile
!python $Script_dir/mothur-align-report-parser-cutoff.py \
$Tag.ssu.out/$Tag.qc.$Gene.align.report \
$Tag.ssu.out/$Tag.qc.$Gene.align \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter \
0.5
!python $Script_dir/remove-gap.py $Tag.ssu.out/$Tag.qc.$Gene.align.filter $Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa
!python $Script_dir/region-cut.py $Tag.ssu.out/$Tag.qc.$Gene.align.filter $Start $End $Len_cutoff
!mv $Tag.ssu.out/$Tag.qc.$Gene.align.filter."$Start"to"$End".cut.lenscreen $Tag.ssu.out/$Tag.forclust
%%bash
rm -f $Tag.ssu.out/$Tag.qc.$Gene.align.filter.silva_taxa_family*.taxonomy
mothur "#classify.seqs(fasta=$Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa, template=$Gene_db, taxonomy=$Gene_tax, cutoff=50, processors=$Cpu)"
mv $Tag.ssu.out/$Tag.qc.$Gene.align.filter.silva_taxa_family*.taxonomy \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy
!python $Script_dir/count-taxon.py \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy.count
!rm -f mothur.*.logfile
%%bash
rm -f $Tag.ssu.out/$Tag.qc.$Gene.align.filter.greengene_97_otus*.taxonomy
mothur "#classify.seqs(fasta=$Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa, template=$Gene_db_cc, taxonomy=$Gene_tax_cc, cutoff=50, processors=$Cpu)"
mv $Tag.ssu.out/$Tag.qc.$Gene.align.filter.greengene_97_otus*.taxonomy \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy
!python $Script_dir/count-taxon.py \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy.count
!rm -f mothur.*.logfile
# check the output directory
!ls $Tag.ssu.out
!echo "*** pipeline runs successsfully :)"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. list<a name="2list"></a>
Step2: list2
Step3: list3
Step4: list4
Step5: string
Step6: 3. array<a name="3array"></a>
Step7: some functions about array
Step8: some functions to creat special array with <font color='seagreen'>NumPy</font>
Step9: <font color='seagreen'>np.linspace (</font> <font color='DarkSlateGray'>a,b,num=50</font> <font color='seagreen'>)</font><a name="linspace"></a>
Step10: <font color='seagreen'>np.zeros(</font> <font color='DarkSlateGray'>shape</font> <font color='seagreen'>)</font><a name="zeros"></a>
Step11: <font color='seagreen'>np.ones(</font> <font color='DarkSlateGray'>shape</font> <font color='seagreen'>)</font><a name="ones"></a>
Step12: <font color='seagreen'>np.eye(</font> <font color='DarkSlateGray'>N</font> <font color='seagreen'>)</font><a name="eye"></a>
Step13: <font color='seagreen'>np.diag(</font> <font color='DarkSlateGray'>v</font> <font color='seagreen'>)</font><a name="diag"></a>
Step14: 4. dictionary<a name="4dictionary"></a>
Step15: 键不可变,可以是数字、字符串、元组。
Step16: copy a dictionary
Step17: check key in dictionary
Step18: check values in dictionary
Step19: clear a dictionary
Step20: modify a dictionary
Step21: 5. transform<a name="5transform"></a>
Step22: list / array / dictionary 转 tuple
Step23: tuple / array / dictionary 转 list
Step24: list / tuple 转 array
|
<ASSISTANT_TASK:>
Python Code:
# 定义一个tuple
tuple1 = ('bosco','ricky','pinky')
tuple1
# 一个项目的 tuple
tuple2 = (5,)
tuple2
# 一次赋多值
x,y,z = tuple1
print(x,y,z)
# 用in判断
'bosco' in tuple1
# 索引
tuple1[0]
# string
tuple("money")
list1 = [16,2,53,24,5,36,67,80]
list1
# 索引 indexing
list1[5]
# 分片 slicing
list1[:5]
list1[3:]
list1[-3:]
list1[1:6]
list1[-4:-1]
list1[0:6:2]
list1[6:0:-2]
list1[::-3]
# more functions
len(list1)
max(list1)
min(list1)
# 改变元素
list1[1]=30
list1
# 增加元素
list1.append(4)
list1
list1.sort()
list1
# 算数
list2 = ['alpha','beta','gamma','gamma','alpha','alpha']
list2.count('alpha')
list2 = [2,56,8,2,9,9,47,2,2]
list2.count(2)
# 联合list
list3 = [56,8,7,91]
list3_add = [15,32,5]
list3.extend(list3_add)
list3
list4 = ['apple','banana','orange','peach']
list4.index('orange') # 找出该元素位置
list4.insert(2,'cherry') # 在2号位插入新元素
list4
list4.pop(1) # 删除1号位元素(并显示被删除的元素)
list4
list4.remove('orange') # 删除该元素(不显示被删除元素)
list4
list('hey')
sorted('hey')
sentence = "Take it away"
sentence.find('it') # i:5
sen1 = ['hello','world','welcome','so','good']
sen2 = 'miao'
sen2.join(sen1) # join函数(str):在sen1的每个元素之间插入sen2
sentence = "I toOK a Photo"
sentence.lower() # 大写字母转小写
sentence.upper() # 小写转大写
sentence = "it was my bro"
sentence.replace('bro','sis') # 替换字符string
import numpy as np
a = np.array([[18., 17., 16.], [14., 19., 18.]])
a
a.ndim # rank of an array
a.shape # dimension or shape of an array (result is a tuple)
a.size # total number of items
a.dtype # type of item (rq. array里面所有item都是同样的类型)
np.arange(15)
np.arange(3,9)
np.arange(3,9,2)
np.linspace(3,9,5)
np.zeros(3)
np.zeros((3,2))
np.ones(3)
np.ones((3,2))
np.eye(3)
np.diag([5,3,6])
family = {'Dad':'Dickson','Mom':'Sara','Bro':'Bosco'}
family
family['Dad']
dict1 = {'a':5,666:'yoooo',(12,'b'):('c','d')}
dict1
home = family.copy()
home
'Mom' in home
'Sis' in home
'Bosco' in home.values()
family.clear()
family
user = {'name':'Bosco','age':40,'sex':'male'}
user
user['age'] = 32 # 修改一个value
user
user['city'] = 'New York' # 添加一个项目
user
del user['sex'] # 删除一个项目
user
list1 = [5,9,88,26,19]
type(list1)
tuple1 = [75,31,4,60]
type(tuple1)
array1 = np.array([62,8,99,6])
type(array1)
dict1 = {'a':5,666:'yoooo',(12,'b'):('c','d')}
type(dict1)
l_to_t = tuple(list1)
type(l_to_t)
a_to_t = tuple(array1)
type(a_to_t)
# key to tuple
d_to_t = tuple(dict1)
d_to_t
# value to tuple
dv_to_t = tuple(dict1.values())
dv_to_t
t_to_l = list(tuple1)
type(t_to_l)
a_to_l = list(array1)
type(a_to_l)
# key to list
d_to_l = list(dict1)
d_to_l
# value to list
dv_to_l = list(dict1.values())
dv_to_l
l_to_a = np.array(list1)
type(l_to_a)
t_to_a = np.array(tuple1)
type(t_to_a)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: STAR Logs
Step2: Picard Metrics
Step3: Expression Distribution
Step4: We can see overal that there are a fair number of genes that are not expressed
Step5: Protein coding genes are highly enriched for being expressed in all samples.
Step6: All samples
Step7: Clustering
Step8: The ANOVA and PC plots show that the different batches on the flowcells
Step9: TODO
|
<ASSISTANT_TASK:>
Python Code:
import copy
import cPickle
import os
import subprocess
import cdpybio as cpb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.linalg import svd
import scipy.stats as stats
import seaborn as sns
import statsmodels.formula.api as smf
import cardipspy as cpy
import ciepy
%matplotlib inline
%load_ext rpy2.ipython
outdir = os.path.join(ciepy.root, 'output',
'rna_seq_analysis')
cpy.makedir(outdir)
private_outdir = os.path.join(ciepy.root, 'private_output',
'rna_seq_analysis')
cpy.makedir(private_outdir)
sns.set_context('notebook')
sns.set_style('whitegrid')
fn = os.path.join(ciepy.root, 'output', 'input_data', 'wgs_metadata.tsv')
wgs_meta = pd.read_table(fn, index_col=0, squeeze=True)
fn = os.path.join(ciepy.root, 'output', 'input_data', 'rnaseq_metadata.tsv')
rna_meta = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'input_data', 'subject_metadata.tsv')
subject_meta = pd.read_table(fn, index_col=0)
ext_meta = pd.read_table(os.path.join(ciepy.root, 'output', 'input_data', 'GSE73211.tsv'), index_col=0,
low_memory=False)
tpm = pd.read_table(os.path.join(ciepy.root, 'output', 'input_data', 'rsem_tpm.tsv'), index_col=0,
low_memory=False)
ext_tpm = pd.read_table(os.path.join(ciepy.root, 'output', 'input_data', 'GSE73211_tpm.tsv'), index_col=0,
low_memory=False)
gene_info = pd.read_table(cpy.gencode_gene_info, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'input_data', 'star_logs.tsv')
star_logs = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'input_data', 'picard_metrics.tsv')
picard = pd.read_table(fn, index_col=0)
meta = rna_meta.merge(subject_meta, left_on='subject_id',
right_index=True, how='inner')
fig, axs = plt.subplots(2, 2)
axs = axs.flatten()
ax = axs[0]
k = 'Number of input reads'
star_logs[k].hist(ax=ax)
ax.set_ylabel('Number of samples')
ax.set_xlabel(k)
ax = axs[1]
k = 'Uniquely mapped reads number'
star_logs[k].hist(ax=ax)
ax.set_ylabel('Number of samples')
ax.set_xlabel(k)
ax = axs[2]
k = 'Uniquely mapped reads %'
star_logs[k].hist(ax=ax)
ax.set_ylabel('Number of samples')
ax.set_xlabel(k)
ax = axs[3]
k = 'Average mapped length'
star_logs[k].hist(ax=ax)
ax.set_ylabel('Number of samples')
ax.set_xlabel(k)
plt.tight_layout()
n = star_logs['Number of input reads'].mean()
print('Average of {:.2f} million read pairs per sample'.format(n / 10**6))
star_logs['Uniquely mapped reads %'].min()
star_logs['Uniquely mapped reads %'].median()
wgs_meta.cell.value_counts()
picard[['MEDIAN_INSERT_SIZE', 'PERCENT_DUPLICATION', u'MEDIAN_5PRIME_TO_3PRIME_BIAS']].hist();
picard.PERCENT_DUPLICATION.median()
picard[['MEDIAN_INSERT_SIZE', 'PERCENT_DUPLICATION', u'MEDIAN_5PRIME_TO_3PRIME_BIAS']].describe()
s = (tpm == 0).sum(axis=1)
print(s.value_counts().head(2))
s.hist(bins=100)
plt.title('Number of samples with zero TPM for each gene')
plt.ylabel('Number of genes')
plt.xlabel('Number of samples');
s = (tpm[gene_info.ix[tpm.index, 'gene_type'] == 'protein_coding'] == 0).sum(axis=1)
print(s.value_counts().head(2))
s.hist(bins=100)
plt.title('Number of samples with zero TPM for each protein coding gene')
plt.ylabel('Number of genes')
plt.xlabel('Number of samples');
plur_markers = ['POU5F1', 'SOX2', 'NANOG', 'ZFP42', 'LIN28A']
diff_markers = ['T', 'EOMES', 'SOX17', 'FOXA2', 'GATA4', 'HAND1',
'CDX2', 'PAX6', 'SOX1', 'EN1']
def get_gene_id(x):
return gene_info[gene_info.gene_name == x].index[0]
plur_markers = pd.Series(plur_markers, index=[get_gene_id(x) for x in plur_markers])
diff_markers = pd.Series(diff_markers, index=[get_gene_id(x) for x in diff_markers])
url = 'http://www.nature.com/nbt/journal/v33/n11/extref/nbt.3387-S5.xlsx'
scorecard = pd.read_excel(url)
scorecard = scorecard.drop(scorecard.columns[2:], axis=1)
scorecard = scorecard[scorecard.gene.apply(lambda x: x in gene_info.gene_name.values)]
scorecard.index = [get_gene_id(x) for x in scorecard.gene]
scorecard = scorecard[scorecard['class'].apply(lambda x: x in ['Mesoderm', 'Pluri'])]
tpm_all = pd.concat([tpm, ext_tpm], axis=1)
tdf = np.log10(tpm_all.ix[scorecard.index].T + 1)
tdf = tdf - tdf.mean()
tdf = tdf / tdf.std()
cs = pd.Series(dict(zip(list(set(ext_meta.cell_type)), sns.color_palette('colorblind')[3:6])))
rc = ([sns.color_palette('colorblind')[2]] * tpm.shape[1]) + list(cs[ext_meta.cell_type])
cs = pd.Series(dict(zip(list(set(scorecard['class'])), sns.color_palette("Set2", 7))))
cc = cs[scorecard['class']]
sns.clustermap(tdf, xticklabels=[], yticklabels=[], col_colors=cc, row_colors=rc);#, col_cluster=False);
# Filter for robustly expressed genes and take log.
tpm_f = tpm[(tpm == 0).sum(axis=1) == 0]
log_tpm = np.log10(tpm_f + 1)
# Mean center.
log_tpm_c = (log_tpm.T - log_tpm.mean(axis=1)).T
# Variance normalize.
log_tpm_n = (log_tpm_c.T / log_tpm_c.std(axis=1)).T
cov = log_tpm.std(axis=1) / log_tpm.mean(axis=1)
cov.sort_values(ascending=False, inplace=True)
cov.hist()
plt.title('Coefficient of variation histogram')
plt.xlabel('Coefficient of variation')
plt.ylabel('Number of genes');
res = cpb.analysis.SVD(log_tpm, scale_variance=True)
res.plot_variance_explained(xtick_start=10, xtick_spacing=20, cumulative=False)
res.plot_variance_explained(num_pc=30)
pc_anova = res.pc_anova(meta[['sex', 'ethnicity_group', 'sequence_id']])
pc_anova.pvalue
fig, ax = res.plot_pc_scatter(
'PC1', 'PC2',
color=meta.ix[res.v.index, 'sequence_id'],
color_name='Flowcell')
fig, ax = res.plot_pc_scatter(
'PC2', 'PC3',
color=meta.ix[res.v.index, 'sequence_id'],
color_name='Flowcell')
fig, ax = res.plot_pc_scatter(
'PC3', 'PC4',
color=meta.ix[res.v.index, 'sequence_id'],
color_name='Flowcell')
a = meta.dropna(subset=['family_id']).drop_duplicates(subset=['family_id'])
b = meta[meta.family_id.isnull()]
euro_no_fam = pd.concat([a, b])
# Filter for robustly expressed genes and take log.
subset_tpm_f = tpm.ix[(tpm == 0).sum(axis=1) == 0, euro_no_fam.index]
subset_log_tpm = np.log10(subset_tpm_f + 1)
# Mean center.
subset_log_tpm_c = (subset_log_tpm.T - subset_log_tpm.mean(axis=1)).T
# Variance normalize.
subset_log_tpm_n = (subset_log_tpm_c.T / subset_log_tpm_c.std(axis=1)).T
subset_cov = subset_log_tpm.std(axis=1) / subset_log_tpm.mean(axis=1)
subset_cov.sort_values(ascending=False, inplace=True)
subset_cov.hist()
plt.title('Coefficient of variation histogram')
plt.xlabel('Coefficient of variation')
plt.ylabel('Number of genes');
subset_svd = cpb.analysis.SVD(subset_log_tpm, scale_variance=True)
subset_svd.plot_variance_explained(xtick_start=10, xtick_spacing=10, cumulative=False)
subset_svd.plot_variance_explained(num_pc=30)
fig, ax = subset_svd.plot_pc_scatter(
'PC1', 'PC2',
color=meta.ix[subset_svd.v.index, 'sequence_id'],
marker=meta.ix[subset_svd.v.index, 'sex'],
color_name='Flowcell',
marker_name='Sex')
subset_svd_500 = cpb.analysis.SVD(subset_log_tpm_n.ix[subset_cov.head(500).index])
fig, ax = subset_svd_500.plot_pc_scatter(
'PC1', 'PC2',
color=meta.ix[subset_svd.v.index, 'sequence_id'],
marker=meta.ix[subset_svd.v.index, 'sex'],
color_name='Flowcell',
marker_name='Sex')
t = subset_log_tpm_n.ix[subset_cov.head(500).index]
ct = t.corr(method='spearman')
cg = sns.clustermap(
ct, xticklabels=False, yticklabels=False,
row_colors=color_meta.ix[ct.index, ['sex']].T.values,
col_colors=color_meta.ix[ct.index, ['sequence_id']].T.values)
tdf = subset_log_tpm_n.ix[subset_cov.head(500).index]
fn = os.path.join(outdir, 'subset_sparse_pca.tsv')
p = os.path.join(outdir, 'subset_sparse_pca.pickle')
if not os.path.exists(fn):
from sklearn import decomposition
sparse_pca = decomposition.SparsePCA(n_components=5, n_jobs=30)
sparse_pca.fit(tdf.T)
ind = ['PC{}'.format(x) for x in
range(1, sparse_pca.components_.shape[0] + 1)]
sparse_components = pd.DataFrame(sparse_pca.components_,
columns=tdf.index,
index=ind).T
sparse_components.to_csv(fn, sep='\t')
cPickle.dump(sparse_pca, open(p, 'wb'))
else:
sparse_componenets = pd.read_table(fn, index_col=0, header=0)
sparse_pca = cPickle.load(open(p, 'rb'))
#plt.scatter(subset_svd_500.u['PC1'], sparse_pca.components_[0], lw=0, alpha=0.8)
ax = sns.jointplot(subset_svd_500.u['PC1'], sparse_pca.components_[0],
stat_func=None, alpha=0.25)
plt.xlabel('PCA loading')
plt.ylabel('Sparse PCA loading')
ax.set_axis_labels(xlabel='Distance in kb',
ylabel='$-\log_{10}$ $p$-value')
plt.tight_layout()
#plt.title('PCA vs. sparse PCA first eigenvector');
sparse_pca.components_[0].max()
ax = sns.jointplot(x=subset_svd_500.u['PC1'].values, y=sparse_pca.components_[0],
stat_func=None, alpha=0.5)
ax.set_axis_labels(xlabel='PCA eigenvector',
ylabel='Sparse PCA eigenvector')
plt.tight_layout()
genes = [x.split('.')[0] for x in subset_cov.index]
sig = np.array([False] * len(genes))
sig[0:500] = True
subset_var_go_results = cpb.analysis.goseq_gene_enrichment(genes, sig)
genes = [x.split('.')[0] for x in subset_cov.index]
sig = pd.Series(np.array([False] * len(genes)), index=subset_cov.index)
se = pd.Series(sparse_pca.components_[0], index=tdf.index)
sig[se[se != 0].index] = True
spc1_var_go_results = cpb.analysis.goseq_gene_enrichment(genes, sig.values)
spc1_var_go_results.head()
se = pd.Series(sparse_pca.components_[0], index=tdf.index)
t = subset_log_tpm_n.ix[se[se != 0].index]
cg = sns.clustermap(
t, xticklabels=False, yticklabels=False,
col_colors=color_meta.ix[ct.index, ['sex', 'sequence_id']].T.values)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Finding the input sets in the model
Step2: Note
Step3: We now want to iterate over the input sets, running the model each time, and retrieving some results for each run.
Step4: A python for loop takes the form
Step5: There are lots of ways to support loop writing, such as the range(n) function, which returns an iterable object that goes from 0 to n-1
Step6: We will loop over the input_sets list, where each item in the list is a Python dictionary. Those dictionaries contain the 'Name' key, which we need to pass to Source
Step7: Now that that's run, lets look at the results
Step8: but we can pick the results for a given input set.
Step9: Note how the results from two calls to v.retrieve_multiple_time_series have been combined into the one DataFrame
Step10: There's obviously no visible difference there...
Step11: So there is a difference, albeit small c.f. the overall system flow.
Step12: Note
Step13: We can see that $InflowScaling is used by three functions - $CrabSaled, $FishScaled and $ShellScaled. These functions are used to scale the respective inflow timeseries by a single scaling factor.
Step14: Before proceeding, lets reset $InflowScaling to its original value
Step15: Alternatively, we could reset all the functions to the values we retrieved earlier.
Step16: Batch run using functions
Step17: Now we can construct our loop and gather the results.
Step18: Notes
Step19: Modifying time series and piecewise linear functions
Step20: This summary doesn't tell you anything about the details of the values in the variable - although in some cases there is a URL pointing to the details (in the PiecewiseFunction column or the TimeSeries column).
Step21: We can query for either the piecewise function or the time series
Step22: We can update a piecewise linear variable by passing an appropriate dataframe (one with two columns, each with numbers) to v.update_variable_piecewise
Step23: You can check that the change has taken effect by looking in the Functions Manager in Source, or retrieving the piecewise function again
Step24: Updating time series variables works in much the same way - you need to pass a DataFrame with an appropriate structure. In this case, you need a date time index and a single column of values.
Step25: Lets generate a synthetic time series based on the existing sequence.
Step26: So, now we need to apply these monthly factors to every day in the time series.
Step27: So, now we need to take that sequence of values and find the corresponding scaling factors.
Step28: Now, see what happens when we provide a series of months (including duplicates)
Step29: Extending this, and using the list of months for each timestep, we can find the scaling factor for each timestep
Step30: Now, we can multiple the values in the time series by the scaling factors
Step31: We now have an extra column in our DataFrame. Source is expecting one column for our time series
Step32: Modifying input sets
Step33: As you can see, the 0s (separated by spaces) have been replaced with a single %s. When used with the string substitution functionality, this tells Python to expect another string that should be inserted at this point.
Step34: Now, we want to convert our list of numbers (monthly_values) to a string, separated by space. We can use the join method, available on all strings, to do what we want.
Step35: We can now combine this list into the template to create an input set command
Step36: We can now update an input set of choice by adding this command.
Step37: We'll modify the first one in the list
Step38: We can use .append to add a command to the 'Configuration'
Step39: Now, we can update the input set within Source
|
<ASSISTANT_TASK:>
Python Code:
import veneer
v = veneer.Veneer(port=9876)
input_sets = v.input_sets()
input_sets
input_sets.as_dataframe()
things_to_record=[
{'NetworkElement':'Lower Gauge','RecordingVariable':'Downstream Flow Volume'},
{'NetworkElement':'Crop Fields'},
{'NetworkElement':'Recreational Lake','RecordingElement':'StorageVolume'}
]
v.configure_recording(enable=things_to_record)
all_results = {}
for i in [0,1,2,3,4]:
print(i)
for i in range(10):
print(i)
import pandas as pd
for input_set in input_sets:
set_name = input_set['Name']
# Log what's happening
veneer.log('Running ' + set_name)
# Run the model with the current input set
v.run_model(SelectedInputSet=set_name)
# Retrieve the run index so we can pass it to v.retrieve_multiple_time_series
run_index = v.retrieve_run()
# Now, retrieve the results we want
end_of_system_flow = v.retrieve_multiple_time_series(run_data=run_index,criteria={
'NetworkElement':'Lower Gauge','RecordingVariable':'Downstream Flow Volume'
})
crop_time_series = v.retrieve_multiple_time_series(run_data=run_index,criteria={
'NetworkElement':'Crop Fields','RecordingVariable':'.*@Demand Model@.*'
})
all_results[set_name] = pd.merge(end_of_system_flow,crop_time_series,left_index=True,right_index=True)
all_results
all_results['Default Input Set'][0:10]
%pylab inline
import matplotlib.pyplot as plt
for input_set in all_results:
all_results[input_set]['Lower Gauge:Downstream Flow Volume'].plot(label=input_set)
plt.legend()
delta = all_results['Default Input Set']-all_results['Unrestricted Take']
delta
delta['Lower Gauge:Downstream Flow Volume'].plot()
functions = v.functions()
f_df = functions.as_dataframe()
f_df
print(f_df)
v.update_function('$InflowScaling',0.5)
print(v.functions().as_dataframe())
v.update_function('$InflowScaling',1.0)
for fn in functions:
v.update_function(fn['Name'],fn['Expression'])
import numpy as np
NUMBER_OF_SIMULATIONS=50
sampled_scaling_factors = np.random.exponential(size=NUMBER_OF_SIMULATIONS)
sampled_scaling_factors
plt.hist(sampled_scaling_factors)
spill_results=[]
# Store our time series criteria in a variable to use it in configuring recording and retrieving results
ts_match_criteria = {'NetworkElement':'Recreational Lake','RecordingVariable':'Spill Volume'}
v.configure_recording(enable=[ts_match_criteria])
for scaling_factor in sampled_scaling_factors:
veneer.log('Running for $InflowScaling=%f'%scaling_factor)
# We are running the multiple many times in this case - so lets drop any results we already have...
v.drop_all_runs()
# Set $InflowScaling to current scaling factor
v.update_function('$InflowScaling',scaling_factor)
v.run_model()
# Retrieve the spill time series, as an annual sum, with the column named for the variable ('Spill Volume')
run_results = v.retrieve_multiple_time_series(criteria=ts_match_criteria,timestep='annual',name_fn=veneer.name_for_variable)
# Store the mean spill volume and the scaling factor we used
spill_results.append({'ScalingFactor':scaling_factor,'SpillVolume':run_results['Spill Volume'].mean()})
# Convert the results to a Data Frame
spill_results_df = pd.DataFrame(spill_results)
spill_results_df
spill_results_df['SpillVolumeGL'] = spill_results_df['SpillVolume'] * 1e-6 # Convert to GL
spill_results_df['SpillVolumeGL'].hist()
variables = v.variables()
variables_df = variables.as_dataframe()
variables_df
variables_df[variables_df.VeneerSupported]
v.variable_piecewise('$PatternPW')
v.variable_time_series('$CrabTS')[::500]
pattern = v.variable_piecewise('$PatternPW')
pattern
pattern['Result'] *= 2.0 # Multiply each value of Result column by 2
pattern
v.update_variable_piecewise('$PatternPW',pattern)
v.variable_piecewise('$PatternPW')
crab_ts = v.variable_time_series('$CrabTS')
crab_ts.plot()
monthly_scaling=[0.6,0.75,1.0,1.0,1.1,1.20,1.20,1.1,1.0,0.8,0.6,0.5]
len(monthly_scaling)
scaling_df = pd.DataFrame(data={'Month':range(1,13),'Scale':monthly_scaling}).set_index('Month')
scaling_df
plt.plot(crab_ts.index.month)
scaling_df.Scale[12]
scaling_df.Scale[[2,5,7,12,12,1]]
scaling_for_timesteps = scaling_df.Scale[crab_ts.index.month].values
plot(scaling_for_timesteps)
crab_ts['ValueScaled'] = crab_ts.Value * scaling_for_timesteps
# Lets plot the first year to see the effect
crab_ts[0:365].plot()
# That's hard to see, so lets look at the difference:
delta = crab_ts.Value-crab_ts.ValueScaled
delta[0:365].plot()
crab_ts.columns
new_ts = crab_ts[['ValueScaled']]
new_ts[0::500]
v.update_variable_time_series('$CrabTS',new_ts)
template='Nodes.Lake Release.Monthly Pattern=[%s]{ML/d}'
monthly_values=[5,3,4,6,6,7,7,6,5,4,4,5]
list_as_string= ' '.join([str(v) for v in monthly_values])
list_as_string
command = template%(list_as_string)
command
input_sets = v.input_sets()
input_sets.as_dataframe()
the_input_set = input_sets[0]
the_input_set['Configuration']
the_input_set['Configuration'].append(command)
the_input_set['Configuration']
v.update_input_set(the_input_set['Name'],the_input_set)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Show event related fields images
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
layout = mne.find_layout(epochs.info, 'meg') # use full layout
title = 'ERF images - MNE sample data'
mne.viz.plot_topo_image_epochs(epochs, layout, sigma=0.5, vmin=-200, vmax=200,
colorbar=True, title=title)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: (Main) Tools
Step2: Python - Making other subjects more lively
|
<ASSISTANT_TASK:>
Python Code:
As I will attempt to describe in the next slides, Python is an amazing way to lead to a more fun learning and teaching
experience.
It can be a basic calculator, a fancy calculator and
Math, Science, Geography..
Tools that will help us in that quest are:
When you bring in SymPy to the picture, things really get awesome. You are suddenly writing computer
programs which are capable of speaking algebra. You are no more limited to numbers.
# Create graphs from algebraic expressions
from sympy import Symbol, plot
x = Symbol('x')
p = plot(2*x**2 + 2*x + 2)
# Solve equations
from sympy import solve, Symbol
x = Symbol('x')
solve(2*x + 1)
# Limits
from sympy import Symbol, Limit, sin
x = Symbol('x')
Limit(sin(x)/x, x, 0).doit()
# Derivative
from sympy import Symbol, Derivative, sin, init_printing
x = Symbol('x')
init_printing()
Derivative(sin(x)**(2*x+1), x).doit()
# Indefinite integral
from sympy import Symbol, Integral, sqrt, sin, init_printing
x = Symbol('x')
init_printing()
Integral(sqrt(x)).doit()
# Definite integral
from sympy import Symbol, Integral, sqrt
x = Symbol('x')
Integral(sqrt(x), (x, 0, 2)).doit()
### TODO: digit recognition using Neural networks
### Scikitlearn, pandas, scipy, statsmodel
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load some Data
Step2: We fit specify the model without any options and fit it. The summary shows that the data was deseasonalized using the multiplicative method. The drift is modest and negative, and the smoothing parameter is fairly low.
Step3: The model is first and foremost a forecasting method. Forecasts are produced using the forecast method from fitted model. Below we produce a hedgehog plot by forecasting 2-years ahead every 2 years.
Step4: We could alternatively fir the log of the data. Here it makes more sense to force the deseasonalizing to use the additive method, if needed. We also fit the model parameters using MLE. This method fits the IMA
Step5: The forecast only depends on the forecast trend component,
Step6: Personal Consumption Expenditure
Step7: Since this series is always positive, we model the $\ln$.
Step8: Next we explore differenced in the forecast as $\theta$ changes. When $\theta$ is close to 1, the drift is nearly absent. As $\theta$ increases, the drift becomes more obvious.
Step9: Finally, plot_predict can be used to visualize the predictions and prediction intervals which are constructed assuming the IMA is true.
Step10: We conclude be producing a hedgehog plot using 2-year non-overlapping samples.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import pandas_datareader as pdr
import matplotlib.pyplot as plt
import seaborn as sns
plt.rc("figure",figsize=(16,8))
plt.rc("font",size=15)
plt.rc("lines",linewidth=3)
sns.set_style("darkgrid")
reader = pdr.fred.FredReader(["HOUST"], start="1980-01-01", end="2020-04-01")
data = reader.read()
housing = data.HOUST
housing.index.freq = housing.index.inferred_freq
ax = housing.plot()
from statsmodels.tsa.forecasting.theta import ThetaModel
tm = ThetaModel(housing)
res = tm.fit()
print(res.summary())
forecasts = {"housing": housing}
for year in range(1995, 2020, 2):
sub = housing[:str(year)]
res = ThetaModel(sub).fit()
fcast = res.forecast(24)
forecasts[str(year)] = fcast
forecasts = pd.DataFrame(forecasts)
ax = forecasts["1995":].plot(legend=False)
children = ax.get_children()
children[0].set_linewidth(4)
children[0].set_alpha(0.3)
children[0].set_color("#000000")
ax.set_title("Housing Starts")
plt.tight_layout(pad=1.0)
tm = ThetaModel(np.log(housing), method="additive")
res = tm.fit(use_mle=True)
print(res.summary())
res.forecast_components(12)
reader = pdr.fred.FredReader(["NA000349Q"], start="1980-01-01", end="2020-04-01")
pce = reader.read()
pce.columns = ["PCE"]
_ = pce.plot()
mod = ThetaModel(np.log(pce))
res = mod.fit()
print(res.summary())
forecasts = pd.DataFrame({"ln PCE":np.log(pce.PCE),
"theta=1.2": res.forecast(12, theta=1.2),
"theta=2": res.forecast(12),
"theta=3": res.forecast(12, theta=3),
"No damping": res.forecast(12, theta=np.inf)
})
_ = forecasts.tail(36).plot()
plt.title("Forecasts of ln PCE")
plt.tight_layout(pad=1.0)
ax = res.plot_predict(24, theta=2)
ln_pce = np.log(pce.PCE)
forecasts = {"ln PCE": ln_pce}
for year in range(1995,2020,3):
sub = ln_pce[:str(year)]
res = ThetaModel(sub).fit()
fcast = res.forecast(12)
forecasts[str(year)] = fcast
forecasts = pd.DataFrame(forecasts)
ax = forecasts["1995":].plot(legend=False)
children = ax.get_children()
children[0].set_linewidth(4)
children[0].set_alpha(0.3)
children[0].set_color("#000000")
ax.set_title("ln PCE")
plt.tight_layout(pad=1.0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 1
Step2: Question 2
Step3: Question 3
Step4: Question 4
Step5: Question 5
Step6: Question 6
Step7: Question 7
Step8: Question 8
Step9: Question 9
Step10: Question 10
Step11: Question 11
Step12: Question 12
Step13: Question 13
Step14: Question 14
|
<ASSISTANT_TASK:>
Python Code:
restaurants = pd.read_csv("NYC_Restaurants.csv", dtype=unicode)
for index, item in enumerate(restaurants.columns.values):
print index, item
#use .apply() method to combine the 4 columns to get the unique restaurant name
restaurants["RESTAURANT"] = restaurants[["DBA", "BUILDING", "STREET", "ZIPCODE"]].\
apply(lambda x: "{} {} {} {}".format(x[0], x[1], x[2], x[3]), axis=1)
#incase that the RESTAURANT names contain spaces or symbols, strip off them
restaurants["RESTAURANT"] = restaurants["RESTAURANT"].map(lambda y: y.strip())
print restaurants["RESTAURANT"][:10]
print "There are", len(restaurants.drop_duplicates(subset="RESTAURANT")["RESTAURANT"].value_counts()), "restaurants in the data."
num_chain = sum(restaurants.drop_duplicates(subset="RESTAURANT")["DBA"].value_counts()>=2)
print "There are", num_chain, "chain restaurants."
chains = restaurants.drop_duplicates(subset="RESTAURANT")["DBA"].value_counts()[: num_chain].index.values
def chain(restaurant):
return (restaurant in chains)
mask = restaurants["DBA"].map(chain)
restaurants[mask].drop_duplicates(subset="RESTAURANT")["DBA"].value_counts()[:20].plot(kind="bar")
print "The percentage of chain restaurants is",
print "{:.2%}".format(sum(restaurants.drop_duplicates(subset="RESTAURANT")["DBA"].value_counts()>=2)/float(len(restaurants["RESTAURANT"].value_counts())))
def lower_case(X):
return X.lower()
mask_1 = restaurants["DBA"].map(chain)
mask_2 = restaurants["BORO"].map(lower_case) != "missing"
restaurants[-mask_1].drop_duplicates(subset="RESTAURANT")[mask_2]["BORO"].value_counts().sort_values(ascending=False).plot(kind="bar")
series_tmp_1 = restaurants[mask_2].drop_duplicates(subset="RESTAURANT")["BORO"].value_counts()
series_tmp_2 = restaurants[-mask_1][mask_2].drop_duplicates(subset="RESTAURANT")["BORO"].value_counts()
series_tmp_ratio = series_tmp_2/series_tmp_1
series_tmp_ratio.sort_values(ascending=False).plot(kind="bar")
print "The highest non-chain/total ratio is:", "{:0.2%} ({})".format(series_tmp_ratio.sort_values(ascending=False)[0],\
series_tmp_ratio.sort_values(ascending=False).index.values[0])
if series_tmp_ratio.sort_values(ascending=False).index.values[0] !=\
restaurants[-mask_1].drop_duplicates(subset="RESTAURANT")[mask_2]["BORO"].value_counts().sort_values(ascending=False).index.values[0]:
print "It is not the same borough."
else:
print "It is the same borough."
restaurants.drop_duplicates(subset="RESTAURANT")["CUISINE DESCRIPTION"].value_counts()\
.sort_values(ascending=False)[:20].plot(kind="bar")
non_clean_restaurants = restaurants[-restaurants["VIOLATION CODE"].isnull()]["RESTAURANT"].value_counts().index.values
def is_clean(restaurant, blacklist=non_clean_restaurants):
return restaurant not in blacklist
mask_clean = restaurants["RESTAURANT"].map(is_clean)
restaurants[mask_clean]["CUISINE DESCRIPTION"].value_counts().sort_values(ascending=False)[:20].plot(kind="bar")
top_cuisine_series = restaurants.drop_duplicates(subset=["RESTAURANT","CUISINE DESCRIPTION"])["CUISINE DESCRIPTION"].value_counts()
def is_top_cuisine(cuisine):
return top_cuisine_series[cuisine]>=20
mask_3 = restaurants["VIOLATION CODE"].isnull()
mask_4 = restaurants["CUISINE DESCRIPTION"].map(is_top_cuisine)
series_tmp_3 = restaurants[mask_4][mask_3]["CUISINE DESCRIPTION"].value_counts()
series_tmp_4 = restaurants[mask_4]["CUISINE DESCRIPTION"].value_counts()
(series_tmp_3/series_tmp_4).sort_values(ascending=False)[:10].plot(kind="bar")
violation_boro_tab = pd.crosstab(
index=restaurants["VIOLATION DESCRIPTION"],
columns=restaurants[restaurants["BORO"]!="Missing"]["BORO"],
dropna=True
)
print "The most common violation in each borough is summarised below:"
violation_boro_tab.idxmax()
violation_frequency_series = restaurants["VIOLATION DESCRIPTION"].value_counts()
violation_boro_norm_tab = violation_boro_tab.apply(lambda x: x.map(float)/violation_frequency_series, axis=0)
print "After normalization, the most common violation in each borough is summarised below:"
violation_boro_norm_tab.idxmax()
restaurants["AREA"] = restaurants["PHONE"].map(lambda x: x[:3])
print "There are",
print sum(restaurants.drop_duplicates(subset=["AREA", "ZIPCODE"])["AREA"].value_counts() == 1),
print "area codes corresponding to only 1 zipcode"
restaurants["STREET TYPE"] = restaurants["STREET"].map(lambda s: s.split()[-1])
restaurants["STREET BASE"] = restaurants["STREET"].map(lambda s: " ".join(s.split()[:-1]))
restaurants["STREET BASE & ZIP"] = restaurants["STREET BASE"].map(lambda s: s+" ") + restaurants["ZIPCODE"]
new_dataframe = pd.concat(
[restaurants["STREET BASE"], restaurants["STREET TYPE"], restaurants["STREET BASE & ZIP"]],
axis=1
)
new_dataframe = new_dataframe[new_dataframe["STREET BASE"].map(lambda s: len(s)>0)].drop_duplicates()
merged_new_dataframe = pd.merge(
new_dataframe,
new_dataframe,
left_on="STREET BASE & ZIP",
right_on="STREET BASE & ZIP",
suffixes=[" 1", " 2"]
)
merged_new_dataframe = merged_new_dataframe[merged_new_dataframe["STREET TYPE 1"] != merged_new_dataframe["STREET TYPE 2"]]
street_name = pd.crosstab(
index=merged_new_dataframe["STREET TYPE 1"],
columns=merged_new_dataframe["STREET TYPE 2"],
dropna=True
)
print "The most common alias for each of the following street type is listed"
street_name.idxmax()[
["AVE", "ST", "RD", "PL", "BOULEARD", "BOULEVARD"]
]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you have done it right, you should see
Step2: Run the program above, if you haven't already! It just runs for 10 cars.
|
<ASSISTANT_TASK:>
Python Code:
total = 0 # initialise total
for yeargroup in range(6):
prompt = "How many pupils are in year S"+str(yeargroup+1)+": "
pupils = int(input(prompt))
total = total + pupils # add to total
print("Total = ", total)
vauxhall = 0
ford = 0
mazda = 0
for car in range(10):
car_make = input("Enter make of car f, v or m: ")
if car_make == "f":
ford = ford + 1 # it's a ford!
if car_make == "v":
vauxhall = vauxhall + 1 # another vauxhall
if car_make == "m":
mazda += 1 # += 1 is a nice shorthand for adding 1
print("\nYou saw:") # \n forces a new line.
print(str(ford)+" Fords,")
print(str(vauxhall)+" Vauxhalls,")
print(str(mazda)+" Mazdas.")
input("Press Enter to Finish")
# Edit this copy of the program to handle another make of car.
vauxhall = 0
ford = 0
mazda = 0
for car in range(10):
car_make = input("Enter make of car f, v or m: ")
if car_make == "f":
ford = ford + 1 # it's a ford!
if car_make == "v":
vauxhall = vauxhall + 1 # anothber vauxhall
if car_make == "m":
mazda += 1 # += 1 is a nice shorthand for adding 1
print("\nYou saw:") # \n forces a new line.
print(str(ford)+" Fords,")
print(str(vauxhall)+" Vauxhalls,")
print(str(mazda)+" Mazdas.")
input("Press Enter to Finish")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Which gives us a nice data frame with weather data for the Oak Ridge National Lab
Step2: Which we can visualize using matplotlib and seaborn
Step3: And ask questions such as
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append(r"..")
import daymetpy
ornl_lat, ornl_long = 35.9313167, -84.3104124
df = daymetpy.daymet_timeseries(lon=ornl_long, lat=ornl_lat, start_year=2012, end_year=2013)
df.head()
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
fig, (ax1, ax2) = plt.subplots(2, figsize=(18, 10), sharex=True)
rolling3day = pd.rolling_mean(df, 15)
ax1.fill_between(rolling3day.index, rolling3day.tmin, rolling3day.tmax, alpha=0.5, lw=0)
ax1.plot(df.index, df[['tmax', 'tmin']].mean(axis=1), lw=2, alpha=0.5)
ax1.set_title('Daymet temp at ORNL', fontsize=20)
ax1.set_ylabel(u'Temp. (°C)', fontsize=20)
monthlysum = df.resample("M", how='sum')
ax2.bar(monthlysum.index, monthlysum.prcp, width=20,)
ax2.set_title('Daymet precip at ORNL', fontsize=20)
ax2.set_ylabel(u'Precip. (mm)', fontsize=20)
fig.tight_layout()
denver_loc = (-104.9903, 39.7392)
miami_loc = (-80.2089, 25.7753)
denver = daymetpy.daymet_timeseries(lon=denver_loc[0], lat=denver_loc[1], start_year=2012, end_year=2014)
miami = daymetpy.daymet_timeseries(lon=miami_loc[0], lat=miami_loc[1], start_year=2012, end_year=2014)
fig, ax1 = plt.subplots(1, figsize=(18, 10))
rolling3day = pd.rolling_mean(denver, 15)
ax1.fill_between(rolling3day.index, rolling3day.tmin, rolling3day.tmax,
alpha=0.4, lw=0, label='Denver', color=sns.xkcd_palette(['faded green'])[0])
ax1.set_title('Denver vs Miami temps (15 day mean)', fontsize=20)
rolling3day = pd.rolling_mean(miami, 15)
ax1.fill_between(rolling3day.index, rolling3day.tmin, rolling3day.tmax,
alpha=0.4, lw=0, label='Miami', color=sns.xkcd_palette(['dusty purple'])[0])
ax1.set_ylabel(u'Temp. (°C)', fontsize=20)
fig.tight_layout()
plt.legend(fontsize=20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: Dados
Step5: a) Descrição do sistema de filas, local, data e horários da coleta de dados
Step6: b) Número de servidores atendendo = S
Step7: c) O tempo médio entre as chegadas à fila ( um cliente a cada 5 min, por ex.)
Step8: d) A taxa de chegada de usuários no sistema de filas – λ ( clientes por unidade de tempo)
Step9: e) O tempo médio de atendimento de cada cliente ( 20 min, por ex.)
Step10: f) A taxa de atendimento dos usuários – μ ( clientes por unidade de tempo, por servidor
Step11: g) A ocupação do sistema(em porcentagem);
Step12: h) A probabilidade de o sistema estar vazio
Step13: i) A probabilidade de todos os servidores estarem ocupados
Step14: k) O número esperado de usuários na fila
Step15: j) O número esperado de usuários no sistema
Step16: l) O tempo médio dos usuários na fila
Step17: m) O tempo provável dos usuários no sistema
Step18: n) Se a taxa de chamadas duplicasse, o que aconteceria com o sistema? E quais as providências que deveriam ser tomadas.
Step19: Antes de pensar em aumentar a quantidade de servidores, deve ser feito um estudo para identificar se é possível otimizar o tempo de atendimento que pode estar sendo prejudicado por algum procedimento ineficiente ou por lentidão dos rescursos computacionais ou de falta de habilidade do servidor. Depois de haver otimização dos recursos, caso não for suficiente, deverá ser avaliada a opção de aumentar o número de servidores nos horários críticos ou apresentar uma proposta para facilitar o atendimento, como pré-atendimento online ou alguma facilidade extra em horários de pouco fluxo de entrada de clientes
|
<ASSISTANT_TASK:>
Python Code:
from datetime import datetime, timedelta
from IPython.display import display
from math import factorial
from matplotlib import pyplot as plt
import io
import numpy as np
import pandas as pd
Σ = sum
%matplotlib inline
def timetable(a, b):
return b + timedelta(minutes=int(a))
v_timetable = np.vectorize(timetable)
def sampling_generator(
size: int=20, initial_datetime: datetime=None
) -> np.array:
if initial_datetime is None:
initial_datetime = datetime.now()
return v_timetable(
np.random.randint(0, 60, size=size),
datetime.now()
)
def mean_diff_time(se_time: pd.Series) -> float:
n = se_time.shape[0]
l_diff = [0] * (n-1)
for i in range(1, n):
j = i-1
l_diff[j] = (se_time[i] - se_time[j]).seconds/60
return np.mean(l_diff)
initial_datetime = datetime.now()
df_chegada = pd.DataFrame({
'entrada': sampling_generator(
initial_datetime=initial_datetime
)
}).sort_values(by='entrada').reset_index(drop=True)
df_saida = pd.DataFrame({
'saída': sampling_generator(
initial_datetime=initial_datetime
)
}).sort_values(by='saída').reset_index(drop=True)
fig, ax = plt.subplots(1, 2)
df_chegada.hist(ax=ax[0])
df_saida.hist(ax=ax[1])
plt.show()
print('tables head')
display(df_chegada.head())
display(df_saida.head())
initial_datetime.strftime('%d de %b de %Y - %l:%M%p')
S = 3
print('S:', S)
μ_chegada = mean_diff_time(df_chegada['entrada'])
# fixando los valores para teste
μ_chegada = 3.05263157895
print('μ_chegada:', μ_chegada, 'mins')
λ = 60/μ_chegada
print('λ:', λ)
μ_atendimento = mean_diff_time(df_saida['saída'])
# fixando valores para teste
μ_atendimento = 2.84210526316
print('μ_atendimento:', μ_atendimento, 'mins')
μ = 60/(μ_atendimento*S)
print('μ:', μ, 'clientes/hora')
ρ = (λ/(S*μ))*100
print('ρ:', ρ, '%')
P_0 = (
Σ([(((λ/μ)**n)/(factorial(n))) for n in range(S)]) +
(((λ/μ)**S)/(factorial(S) * (1-(λ/(S*μ)))))
)**(-1)
print('P_0:', P_0)
P_ocupados = (((λ/μ)**S)/(factorial(S)*(1-(λ/(S*μ))))) * P_0
print('P_ocupados:', P_ocupados)
L_q = ((((λ/μ)**S)*λ*μ*S)/(factorial(S)*((μ*S-λ)**2)))*P_0
print('L_q:', L_q, 'usuários na fila')
L = L_q + (λ/μ)
print('L:', L, 'usuários no sistema')
W_q = L_q/λ
print('W_q:', W_q, 'horas')
W = L/λ
print('W:', W, 'horas')
p_dobro = ((λ*2)/(S*μ))
print('p_dobro:', p_dobro*100, '%')
if p_dobro < 1:
print('Ok: o sistema atinge a estabilidade')
else:
print('O sistema não atinge a estabilidade')
# dados de teste providos pelo professor
t_S = 3
t_λ = 19.655 # Clientes/hora
t_μ = 7.037 # Clientes/hora
t_ρ = 0.93 # %
t_P_0 = 0.016570 # 1,657 % - Probabilidade 0 clientes
# Prob S clientes no sistema 0.060176 -> 6,018 %
t_P_ocupados = 0.87251 # 87,251 %
t_L_q = 11.78 # clientes
t_L = 14.57 # clientes - Número médio clientes no sistema
t_W_q = 0.599 # horas - Tempo médio de espera na fila
t_W = 0.741 # horas - Tempo médio de espera sistema
np.testing.assert_approx_equal(S, t_S)
np.testing.assert_approx_equal(λ, t_λ, 3)
np.testing.assert_approx_equal(μ, t_μ, 3)
np.testing.assert_approx_equal(ρ, t_ρ*100, 1)
np.testing.assert_approx_equal(P_0, t_P_0, 3)
np.testing.assert_approx_equal(P_ocupados, t_P_ocupados, 3)
np.testing.assert_approx_equal(L_q, t_L_q, 3)
np.testing.assert_approx_equal(L, t_L, 3)
np.testing.assert_approx_equal(W_q, W_q, 3)
np.testing.assert_approx_equal(W, t_W, 3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we will create the states of the model, one uniform and one normal.
Step2: We will then create the model by creating a HiddenMarkovModel instance. Then we will add the states.
Step3: Now we'll add the start states to the model.
Step4: And the transition matrix.
Step5: Finally the ending states to the model.
Step6: To finalize the model, we "bake" it.
Step7: New we'll create a sample sequence using our model.
Step8: Now we'll feed the sequence through a forward algorithm with our model.
Step9: Next we'll do the same, except with a backwards algorithm.
Step10: Then we'll feed the sequence again, through a forward-backward algorithm.
Step11: Finally we'll train our model with our example sequence.
Step12: Then repeat the algorithms we fed the sequence through before on our improved model.
|
<ASSISTANT_TASK:>
Python Code:
import random
from pomegranate import *
random.seed(0)
state1 = State( UniformDistribution(0.0, 1.0), name="uniform" )
state2 = State( NormalDistribution(0, 2), name="normal" )
model = HiddenMarkovModel( name="ExampleModel" )
model.add_state( state1 )
model.add_state( state2 )
model.add_transition( model.start, state1, 0.5 )
model.add_transition( model.start, state2, 0.5 )
model.add_transition( state1, state1, 0.4 )
model.add_transition( state1, state2, 0.4 )
model.add_transition( state2, state2, 0.4 )
model.add_transition( state2, state1, 0.4 )
model.add_transition( state1, model.end, 0.2 )
model.add_transition( state2, model.end, 0.2 )
model.bake()
sequence = model.sample()
print(sequence)
print(model.forward( sequence )[ len(sequence), model.end_index ])
print(model.backward( sequence )[0,model.start_index])
trans, ems = model.forward_backward( sequence )
print(trans)
print(ems)
model.fit( [ sequence ] )
print("Forward")
print(model.forward( sequence )[ len(sequence), model.end_index ])
print()
print("Backward")
print(model.backward( sequence )[ 0,model.start_index ])
print()
trans, ems = model.forward_backward( sequence )
print(trans)
print(ems)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We generated an XML which consists of several <page> elements, containing an <image> (the "background" image, i.e. the scanned page) and several text boxes (<text>) with coordinates and the respective text box value. The images themselves have also been extracted from the PDF.
Step2: When you execute the above line you can start up pdf2xml-viewer by visiting http
Step3: Now we can load the the XML, parse it and have a look at the third page (the page from which we later want to extract the data).
Step4: Each page consists of an xmlnode which points to the original XML page element, a page number, the page dimensions, an image (the scanned page) and the text boxes
Step5: The text boxes in p['texts'] contain the coordinates and dimensions of each text box as well as the content (value) and a reference to the original XML node.
Step6: The important thing is that we created an ImageProc instance using the scanned page image file that is referred to in the image key of the page p. ImageProc will identify the dimensions of the image file which allows us to calculate the scaling between the image dimensions and the text boxes' coordinate system. It is important to understand that the coordinate system in image space has a different scaling than the coordinate system used to position the text boxes. For example, the image could be scanned with a size of 1000x3000 pixels (iproc_obj.img_w by iproc_obj.img_h) while the text boxes of a page are positioned on a canvas of size 500x1500 units (p['width'] by p['height']).
Step7: This is our image with the detected lines
Step8: We can see that a skew in direction sy (meaning vertical direction) is detected. To fix this, deskew_textboxes is called with our page p as first parameter, then the negated skew (negated in order to "skew back"), the skew type (direction) and the origin point about which the deskewing is done (top left corner).
Step9: As we can see, only the horizontal lines are straightened. Please note that the deskewing is not applied to the original image because this is not necessary for our further processing and hence the repaired lines are now a bit off from the table borders in the original image.
Step10: 5. Detect clusters of vertical lines for identifying the columns of a table
Step11: Here we see the detected clusters marked with different colors
Step12: 6. Find out the row positions of the table by analyzing the y-coordinates' distribution of text boxes
Step13: Next we get the text boxes' top and bottom border positions, cluster them, and calculate the cluster centers.
Step14: Now try to find the first table row by checking when we find the first text box that contains a string which matches a typical row pattern.
Step15: We find the last table row by checking if we find a "big" text box in the lower page area that matches certain words.
Step16: 7. Create a grid of columns and lines
Step17: We should now evaluate our page grid using pdf2xml-viewer. At first, we will need to load the XML of the repaired (deskewed) pages and then the JSON file containing the page grid information for page 3. When we select page 3 we should see an output like this
Step18: Of course you would now usually do a lot of parsing, data clean up and validation because with this kind of messy data it will never be 100% perfect. Still this is a good basis to work on. We can export the data now as CSV and Excel
|
<ASSISTANT_TASK:>
Python Code:
!cd data/ && pdftohtml -c -hidden -xml ALA1934_RR-excerpt.pdf ALA1934_RR-excerpt.pdf.xml
!ls -1 data/
!head -n 30 data/ALA1934_RR-excerpt.pdf.xml
!python3 -m http.server 8080 --bind 127.0.0.1
DATAPATH = 'data/'
OUTPUTPATH = 'generated_output/'
INPUT_XML = 'ALA1934_RR-excerpt.pdf.xml'
import os
from pdftabextract.common import read_xml, parse_pages
# Load the XML that was generated with pdftohtml
xmltree, xmlroot = read_xml(os.path.join(DATAPATH, INPUT_XML))
# parse it and generate a dict of pages
pages = parse_pages(xmlroot)
pages[3].keys()
from pprint import pprint
p_num = 3
p = pages[p_num]
print('number', p['number'])
print('width', p['width'])
print('height', p['height'])
print('image', p['image'])
print('the first three text boxes:')
pprint(p['texts'][:3])
import numpy as np
from pdftabextract import imgproc
# get the image file of the scanned page
imgfilebasename = p['image'][:p['image'].rindex('.')]
imgfile = os.path.join(DATAPATH, p['image'])
print("page %d: detecting lines in image file '%s'..." % (p_num, imgfile))
# create an image processing object with the scanned page
iproc_obj = imgproc.ImageProc(imgfile)
# calculate the scaling of the image file in relation to the text boxes coordinate system dimensions
page_scaling_x = iproc_obj.img_w / p['width'] # scaling in X-direction
page_scaling_y = iproc_obj.img_h / p['height'] # scaling in Y-direction
# detect the lines
lines_hough = iproc_obj.detect_lines(canny_kernel_size=3, canny_low_thresh=50, canny_high_thresh=150,
hough_rho_res=1,
hough_theta_res=np.pi/500,
hough_votes_thresh=round(0.2 * iproc_obj.img_w))
print("> found %d lines" % len(lines_hough))
import cv2
# helper function to save an image
def save_image_w_lines(iproc_obj, imgfilebasename):
img_lines = iproc_obj.draw_lines(orig_img_as_background=True)
img_lines_file = os.path.join(OUTPUTPATH, '%s-lines-orig.png' % imgfilebasename)
print("> saving image with detected lines to '%s'" % img_lines_file)
cv2.imwrite(img_lines_file, img_lines)
save_image_w_lines(iproc_obj, imgfilebasename)
from math import radians, degrees
from pdftabextract.common import ROTATION, SKEW_X, SKEW_Y
from pdftabextract.geom import pt
from pdftabextract.textboxes import rotate_textboxes, deskew_textboxes
# find rotation or skew
# the parameters are:
# 1. the minimum threshold in radians for a rotation to be counted as such
# 2. the maximum threshold for the difference between horizontal and vertical line rotation (to detect skew)
# 3. an optional threshold to filter out "stray" lines whose angle is too far apart from the median angle of
# all other lines that go in the same direction (no effect here)
rot_or_skew_type, rot_or_skew_radians = iproc_obj.find_rotation_or_skew(radians(0.5), # uses "lines_hough"
radians(1),
omit_on_rot_thresh=radians(0.5))
# rotate back or deskew text boxes
needs_fix = True
if rot_or_skew_type == ROTATION:
print("> rotating back by %f°" % -degrees(rot_or_skew_radians))
rotate_textboxes(p, -rot_or_skew_radians, pt(0, 0))
elif rot_or_skew_type in (SKEW_X, SKEW_Y):
print("> deskewing in direction '%s' by %f°" % (rot_or_skew_type, -degrees(rot_or_skew_radians)))
deskew_textboxes(p, -rot_or_skew_radians, rot_or_skew_type, pt(0, 0))
else:
needs_fix = False
print("> no page rotation / skew found")
if needs_fix:
# rotate back or deskew detected lines
lines_hough = iproc_obj.apply_found_rotation_or_skew(rot_or_skew_type, -rot_or_skew_radians)
save_image_w_lines(iproc_obj, imgfilebasename + '-repaired')
# save repaired XML (i.e. XML with deskewed textbox positions)
output_files_basename = INPUT_XML[:INPUT_XML.rindex('.')]
repaired_xmlfile = os.path.join(OUTPUTPATH, output_files_basename + '.repaired.xml')
print("saving repaired XML file to '%s'..." % repaired_xmlfile)
xmltree.write(repaired_xmlfile)
from pdftabextract.clustering import find_clusters_1d_break_dist
MIN_COL_WIDTH = 60 # minimum width of a column in pixels, measured in the scanned pages
# cluster the detected *vertical* lines using find_clusters_1d_break_dist as simple clustering function
# (break on distance MIN_COL_WIDTH/2)
# additionally, remove all cluster sections that are considered empty
# a cluster is considered empty when the number of text boxes in it is below 10% of the median number of text boxes
# per cluster section
vertical_clusters = iproc_obj.find_clusters(imgproc.DIRECTION_VERTICAL, find_clusters_1d_break_dist,
remove_empty_cluster_sections_use_texts=p['texts'], # use this page's textboxes
remove_empty_cluster_sections_n_texts_ratio=0.1, # 10% rule
remove_empty_cluster_sections_scaling=page_scaling_x, # the positions are in "scanned image space" -> we scale them to "text box space"
dist_thresh=MIN_COL_WIDTH/2)
print("> found %d clusters" % len(vertical_clusters))
# draw the clusters
img_w_clusters = iproc_obj.draw_line_clusters(imgproc.DIRECTION_VERTICAL, vertical_clusters)
save_img_file = os.path.join(OUTPUTPATH, '%s-vertical-clusters.png' % imgfilebasename)
print("> saving image with detected vertical clusters to '%s'" % save_img_file)
cv2.imwrite(save_img_file, img_w_clusters)
from pdftabextract.clustering import calc_cluster_centers_1d
page_colpos = np.array(calc_cluster_centers_1d(vertical_clusters)) / page_scaling_x
print('found %d column borders:' % len(page_colpos))
print(page_colpos)
# right border of the second column
col2_rightborder = page_colpos[2]
# calculate median text box height
median_text_height = np.median([t['height'] for t in p['texts']])
# get all texts in the first two columns with a "usual" textbox height
# we will only use these text boxes in order to determine the line positions because they are more "stable"
# otherwise, especially the right side of the column header can lead to problems detecting the first table row
text_height_deviation_thresh = median_text_height / 2
texts_cols_1_2 = [t for t in p['texts']
if t['right'] <= col2_rightborder
and abs(t['height'] - median_text_height) <= text_height_deviation_thresh]
from pdftabextract.clustering import zip_clusters_and_values
from pdftabextract.textboxes import border_positions_from_texts, split_texts_by_positions, join_texts
from pdftabextract.common import all_a_in_b, DIRECTION_VERTICAL
# get all textboxes' top and bottom border positions
borders_y = border_positions_from_texts(texts_cols_1_2, DIRECTION_VERTICAL)
# break into clusters using half of the median text height as break distance
clusters_y = find_clusters_1d_break_dist(borders_y, dist_thresh=median_text_height/2)
clusters_w_vals = zip_clusters_and_values(clusters_y, borders_y)
# for each cluster, calculate the median as center
pos_y = calc_cluster_centers_1d(clusters_w_vals)
pos_y.append(p['height'])
print('number of line positions:', len(pos_y))
import re
# a (possibly malformed) population number + space + start of city name
pttrn_table_row_beginning = re.compile(r'^[\d Oo][\d Oo]{2,} +[A-ZÄÖÜ]')
# 1. try to find the top row of the table
texts_cols_1_2_per_line = split_texts_by_positions(texts_cols_1_2, pos_y, DIRECTION_VERTICAL,
alignment='middle',
enrich_with_positions=True)
# go through the texts line per line
for line_texts, (line_top, line_bottom) in texts_cols_1_2_per_line:
line_str = join_texts(line_texts)
if pttrn_table_row_beginning.match(line_str): # check if the line content matches the given pattern
top_y = line_top
break
else:
top_y = 0
top_y
# hints for a footer text box
words_in_footer = ('anzeige', 'annahme', 'ala')
# 2. try to find the bottom row of the table
min_footer_text_height = median_text_height * 1.5
min_footer_y_pos = p['height'] * 0.7
# get all texts in the lower 30% of the page that have are at least 50% bigger than the median textbox height
bottom_texts = [t for t in p['texts']
if t['top'] >= min_footer_y_pos and t['height'] >= min_footer_text_height]
bottom_texts_per_line = split_texts_by_positions(bottom_texts,
pos_y + [p['height']], # always down to the end of the page
DIRECTION_VERTICAL,
alignment='middle',
enrich_with_positions=True)
# go through the texts at the bottom line per line
page_span = page_colpos[-1] - page_colpos[0]
min_footer_text_width = page_span * 0.8
for line_texts, (line_top, line_bottom) in bottom_texts_per_line:
line_str = join_texts(line_texts)
has_wide_footer_text = any(t['width'] >= min_footer_text_width for t in line_texts)
# check if there's at least one wide text or if all of the required words for a footer match
if has_wide_footer_text or all_a_in_b(words_in_footer, line_str):
bottom_y = line_top
break
else:
bottom_y = p['height']
bottom_y
# finally filter the line positions so that only the lines between the table top and bottom are left
page_rowpos = [y for y in pos_y if top_y <= y <= bottom_y]
print("> page %d: %d lines between [%f, %f]" % (p_num, len(page_rowpos), top_y, bottom_y))
from pdftabextract.extract import make_grid_from_positions
grid = make_grid_from_positions(page_colpos, page_rowpos)
n_rows = len(grid)
n_cols = len(grid[0])
print("> page %d: grid with %d rows, %d columns" % (p_num, n_rows, n_cols))
# the page grid is a 2D matrix that contains the rectangular table cells
# this is the top left table cell:
grid[0][0]
from pdftabextract.common import save_page_grids
page_grids_file = os.path.join(OUTPUTPATH, output_files_basename + '.pagegrids_p3_only.json')
print("saving page grids JSON file to '%s'" % page_grids_file)
save_page_grids({p_num: grid}, page_grids_file)
from pdftabextract.extract import fit_texts_into_grid, datatable_to_dataframe
datatable = fit_texts_into_grid(p['texts'], grid)
df = datatable_to_dataframe(datatable)
df.head(n=10)
csv_output_file = os.path.join(OUTPUTPATH, output_files_basename + '-p3_only.csv')
print("saving extracted data to '%s'" % csv_output_file)
df.to_csv(csv_output_file, index=False)
excel_output_file = os.path.join(OUTPUTPATH, output_files_basename + '-p3_only.xlsx')
print("saving extracted data to '%s'" % excel_output_file)
df.to_excel(excel_output_file, index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Principal Components Analysis
Step2: Recursive Feature Elimination
Step3: Splitting the data in a train and a test subset
Step4: Test the result of training the model
Step5: Cross validation & feature selection
|
<ASSISTANT_TASK:>
Python Code:
#Final iteration tried across different cuts. Accuracy >55%
keywords = [ 'nice', 'pleased', 'better',
'like', 'easy', 'excellent',
'love','impressed',
'satisfied','pretty',
'best','works great']
for key in keywords:
# Note that we add spaces around the key so that we're getting the word,
# not just pattern matching.
amazon[str(key)] = amazon.Sentence.str.contains(
' '+str(key)+' ',
case=False
)
#Use the whole dataset
amazon['allcaps'] = amazon.Sentence.str.isupper()
amazon['Sentiment'] = (amazon['Sentiment'] == 1)
plt.figure(figsize=(20, 7))
sns.heatmap(amazon.corr())
# Define data & target for the model
data = amazon[keywords + ['allcaps']]
target = amazon['Sentiment']
# Initantiate our model and store it in a new variable.
bnb = BernoulliNB()
# Fit our model to the data.
bnb.fit(data, target)
# Classify, storing the result in a new variable.
y_pred = bnb.predict(data)
# Display our results.
print("Number of mislabeled points (w/o training) out of a total {} points : {}".format(
data.shape[0],
(target != y_pred).sum()
))
#Calculate Confusion Martrix
print(confusion_matrix(target, y_pred))
# from confusion matrix calculate accuracy
cm = confusion_matrix(target, y_pred)
#Accuracy
total=sum(sum(cm))
accuracy=(cm[0,0]+cm[1,1])/total
print ('Accuracy : ', accuracy)
#Specifity
Specifity = cm[0,0]/(cm[0,0]+cm[0,1])
print('Specifity : ', Specifity )
#Sensitivity
Sensitivity = cm[1,1]/(cm[1,0]+cm[1,1])
print('Sensitivity : ', Sensitivity)
#Prepare data for PCA
X_std = StandardScaler().fit_transform(data)
mean_vec = np.mean(X_std, axis=0)
cov_mat = (X_std - mean_vec).T.dot((X_std - mean_vec)) / (X_std.shape[0]-1)
#Eigenvectores & Eigenvalues
cov_mat = np.cov(X_std.T)
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
# Inspecting the eigenvalues and eigenvectors.
for i in range(len(eig_vals)):
eigvecs = eig_vecs[:, i].reshape(1, len(keywords)+1).T
print('Eigenvector {}: \n{}'.format(i + 1, eigvecs))
print('Eigenvalue {}: {}'.format(i + 1, eig_vals[i]))
print(40 * '-')
sklearn_pca = PCA(n_components=len(keywords))
Y_sklearn = sklearn_pca.fit_transform(X_std)
print(
'The percentage of total variance in the dataset explained by each',
'component from Sklearn PCA.\n',
sklearn_pca.explained_variance_ratio_
)
# create the RFE model and select attributes
rfe = RFE(bnb, len(keywords))
fit = rfe.fit(data,target)
# summarize the selection of the attributes
result_RFE = pd.DataFrame(list(zip(data.head(0), rfe.ranking_, rfe.support_)),columns=['Features','Ranking','Support'] )
result_RFE.sort_values('Ranking')
#Result as a list for "keywords"
result= result_RFE[result_RFE.Support]
my_list = result["Features"].tolist()
my_list
for key in keywords:
# Note that we add spaces around the key so that we're getting the word,
# not just pattern matching.
train[str(key)] = train.Sentence.str.contains(
' '+str(key)+' ',
case=False
)
#Using the train subset
train['allcaps'] = train.Sentence.str.isupper()
train['Sentiment'] = (train['Sentiment'] == 1)
#Plot correlation matrix
plt.figure(figsize=(20, 7))
sns.heatmap(train.corr())
# Define training data & target for the model
data1 = train[keywords + ['allcaps']]
target1 = train['Sentiment']
# Initantiate our model and store it in a new variable.
bnb2 = BernoulliNB()
# Fit our model to the data.
bnb2.fit(data1, target1)
# Classify, storing the result in a new variable.
y_pred1 = bnb2.predict(data1)
# Display our results.
print("Number of mislabeled points in TRAIN out of a total {} points : {}".format(
data1.shape[0],
(target1 != y_pred1).sum()
))
confusion_matrix(target1, y_pred1)
# Display our results.
print("Number of mislabeled points in TRAIN out of a total {} points : {}".format(
data1.shape[0],
(target1 != y_pred1).sum()
))
confusion_matrix(target1, y_pred1)
# from confusion matrix calculate accuracy
cm1 = confusion_matrix(target1, y_pred1)
total1=sum(sum(cm1))
accuracy1=(cm1[0,0]+cm1[1,1])/total1
print ('Accuracy : ', accuracy1)
Specifity1 = cm1[0,0]/(cm1[0,0]+cm1[0,1])
print('Specifity : ', Specifity1 )
Sensitivity1 = cm1[1,1]/(cm1[1,0]+cm1[1,1])
print('Sensitivity : ', Sensitivity1)
for key in keywords:
# Note that we add spaces around the key so that we're getting the word,
# not just pattern matching.
test[str(key)] = test.Sentence.str.contains(
' '+str(key)+' ',
case=False
)
test['allcaps'] = test.Sentence.str.isupper()
test['Sentiment'] = (test['Sentiment'] == 1)
#Define the data & target with test subset
data2 = test[keywords +['allcaps']]
target2 = test['Sentiment']
# Initantiate our model and store it in a new variable.
bnb2 = BernoulliNB()
# Fit our model to the data.
bnb2.fit(data2, target2)
# Classify, storing the result in a new variable.
y_pred2 = bnb2.predict(data2)
# Display our results.
print("Number of mislabeled points in TEST out of a total {} points : {}".format(
data2.shape[0],
(target2 != y_pred2).sum()
))
confusion_matrix(target2, y_pred2)
# from confusion matrix calculate accuracy
cm2 = confusion_matrix(target2, y_pred2)
total2=sum(sum(cm2))
accuracy2=(cm2[0,0]+cm2[1,1])/total2
print ('Accuracy : ', accuracy2)
Specifity2 = cm2[0,0]/(cm2[0,0]+cm2[0,1])
print('Specifity : ', Specifity2 )
Sensitivity2 = cm2[1,1]/(cm2[1,0]+cm2[1,1])
print('Sensitivity : ', Sensitivity2)
#Test through cross validation the result of the best model using the the hould out subsets
keywords2 = keywords
for key in keywords2:
# Note that we add spaces around the key so that we're getting the word,
# not just pattern matching.
train[str(key)] = train.Sentence.str.contains(
' '+str(key)+' ',
case=False
)
#Define data andtarget for cross validation
amazon['allcaps'] = amazon.Sentence.str.isupper()
amazon['Sentiment'] = (amazon['Sentiment'] == 1)
datacv = amazon[keywords2]
targetcv = amazon['Sentiment']
# Initantiate our model and store it in a new variable.
bnbcv = BernoulliNB()
# Fit our model to the data.
bnbcv.fit(datacv, targetcv)
# Classify, storing the result in a new variable.
y_predcv = bnbcv.predict(datacv)
# Display our results.
print("Number of mislabeled points out of a total {} points : {}".format(
datacv.shape[0],
(targetcv != y_predcv).sum()
))
#Show Confusion Matrix
confusion_matrix(targetcv, y_predcv)
#Cross validation, scores
skf = StratifiedKFold(n_splits=10, random_state=0)
scores = cross_val_score(bnb, datacv, targetcv, cv=skf)
#Test the prediction capacity of the model
predicted = cross_val_predict(bnb, datacv, y_predcv, cv=skf)
prediction = metrics.accuracy_score(targetcv, predicted)
#Print scores, accuracy of the model and prediction
print(scores)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
print("Prediction: %0.2f " % (prediction))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A couple utility functions to plot grayscale 28x28 image
Step2: PCA with a linear Autoencoder
Step3: Normalize the data
Step4: Now let's build the Autoencoder...
Step5: Stacked Autoencoders
Step6: Train all layers at once
Step7: Now let's train it! Note that we don't feed target values (y_batch is not used). This is unsupervised training.
Step8: This function loads the model, evaluates it on the test set (it measures the reconstruction error), then it displays the original image and its reconstruction
Step9: Tying weights
Step10: Training one Autoencoder at a time in multiple graphs
Step11: Now let's train two Autoencoders. The first one is trained on the training data, and the second is trained on the previous Autoencoder's hidden layer output
Step12: Finally, we can create a Stacked Autoencoder by simply reusing the weights and biases from the Autoencoders we just trained
Step13: Training one Autoencoder at a time in a single graph
Step14: Cache the frozen layer outputs
Step15: Visualizing the Reconstructions
Step16: Visualizing the extracted features
Step17: Unsupervised pretraining
Step18: Regular training (without pretraining)
Step19: Now reusing the first two layers of the autoencoder we pretrained
Step20: Stacked denoising Autoencoder
Step21: Using dropout
Step22: Sparse Autoencoder
Step23: Note that the coding layer must output values from 0 to 1, which is why we use the sigmoid activation function
Step24: To speed up training, you can normalize the inputs between 0 and 1, and use the cross entropy instead of the MSE for the cost function
Step25: Variational Autoencoder
Step26: Generate digits
Step27: Note that the latent loss is computed differently in this second variant
Step28: Encode & Decode
Step29: Decode
Step30: Let's plot the reconstructions
Step31: Interpolate digits
|
<ASSISTANT_TASK:>
Python Code:
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import sys
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "autoencoders"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
def plot_image(image, shape=[28, 28]):
plt.imshow(image.reshape(shape), cmap="Greys", interpolation="nearest")
plt.axis("off")
def plot_multiple_images(images, n_rows, n_cols, pad=2):
images = images - images.min() # make the minimum == 0, so the padding looks white
w,h = images.shape[1:]
image = np.zeros(((w+pad)*n_rows+pad, (h+pad)*n_cols+pad))
for y in range(n_rows):
for x in range(n_cols):
image[(y*(h+pad)+pad):(y*(h+pad)+pad+h),(x*(w+pad)+pad):(x*(w+pad)+pad+w)] = images[y*n_cols+x]
plt.imshow(image, cmap="Greys", interpolation="nearest")
plt.axis("off")
rnd.seed(4)
m = 200
w1, w2 = 0.1, 0.3
noise = 0.1
angles = rnd.rand(m) * 3 * np.pi / 2 - 0.5
data = np.empty((m, 3))
data[:, 0] = np.cos(angles) + np.sin(angles)/2 + noise * rnd.randn(m) / 2
data[:, 1] = np.sin(angles) * 0.7 + noise * rnd.randn(m) / 2
data[:, 2] = data[:, 0] * w1 + data[:, 1] * w2 + noise * rnd.randn(m)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(data[:100])
X_test = scaler.transform(data[100:])
import tensorflow as tf
reset_graph()
n_inputs = 3
n_hidden = 2 # codings
n_outputs = n_inputs
learning_rate = 0.01
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden)
outputs = tf.layers.dense(hidden, n_outputs)
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X))
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(reconstruction_loss)
init = tf.global_variables_initializer()
n_iterations = 1000
codings = hidden
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
training_op.run(feed_dict={X: X_train})
codings_val = codings.eval(feed_dict={X: X_test})
fig = plt.figure(figsize=(4,3))
plt.plot(codings_val[:,0], codings_val[:, 1], "b.")
plt.xlabel("$z_1$", fontsize=18)
plt.ylabel("$z_2$", fontsize=18, rotation=0)
save_fig("linear_autoencoder_pca_plot")
plt.show()
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
reset_graph()
from functools import partial
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
l2_reg = 0.0001
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
he_init = tf.contrib.layers.variance_scaling_initializer() # He initialization
#Equivalent to:
#he_init = lambda shape, dtype=tf.float32: tf.truncated_normal(shape, 0., stddev=np.sqrt(2/shape[0]))
l2_regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
my_dense_layer = partial(tf.layers.dense,
activation=tf.nn.elu,
kernel_initializer=he_init,
kernel_regularizer=l2_regularizer)
hidden1 = my_dense_layer(X, n_hidden1)
hidden2 = my_dense_layer(hidden1, n_hidden2)
hidden3 = my_dense_layer(hidden2, n_hidden3)
outputs = my_dense_layer(hidden3, n_outputs, activation=None)
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([reconstruction_loss] + reg_losses)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver() # not shown in the book
n_epochs = 5
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="") # not shown in the book
sys.stdout.flush() # not shown
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
loss_train = reconstruction_loss.eval(feed_dict={X: X_batch}) # not shown
print("\r{}".format(epoch), "Train MSE:", loss_train) # not shown
saver.save(sess, "./my_model_all_layers.ckpt") # not shown
def show_reconstructed_digits(X, outputs, model_path = None, n_test_digits = 2):
with tf.Session() as sess:
if model_path:
saver.restore(sess, model_path)
X_test = mnist.test.images[:n_test_digits]
outputs_val = outputs.eval(feed_dict={X: X_test})
fig = plt.figure(figsize=(8, 3 * n_test_digits))
for digit_index in range(n_test_digits):
plt.subplot(n_test_digits, 2, digit_index * 2 + 1)
plot_image(X_test[digit_index])
plt.subplot(n_test_digits, 2, digit_index * 2 + 2)
plot_image(outputs_val[digit_index])
show_reconstructed_digits(X, outputs, "./my_model_all_layers.ckpt")
save_fig("reconstruction_plot")
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
l2_reg = 0.0005
activation = tf.nn.elu
regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_hidden2])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
weights3 = tf.transpose(weights2, name="weights3") # tied weights
weights4 = tf.transpose(weights1, name="weights4") # tied weights
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_hidden2), name="biases2")
biases3 = tf.Variable(tf.zeros(n_hidden3), name="biases3")
biases4 = tf.Variable(tf.zeros(n_outputs), name="biases4")
hidden1 = activation(tf.matmul(X, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
hidden3 = activation(tf.matmul(hidden2, weights3) + biases3)
outputs = tf.matmul(hidden3, weights4) + biases4
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X))
reg_loss = regularizer(weights1) + regularizer(weights2)
loss = reconstruction_loss + reg_loss
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 5
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
loss_train = reconstruction_loss.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", loss_train)
saver.save(sess, "./my_model_tying_weights.ckpt")
show_reconstructed_digits(X, outputs, "./my_model_tying_weights.ckpt")
reset_graph()
from functools import partial
def train_autoencoder(X_train, n_neurons, n_epochs, batch_size,
learning_rate = 0.01, l2_reg = 0.0005,
activation=tf.nn.elu, seed=42):
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(seed)
n_inputs = X_train.shape[1]
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
my_dense_layer = partial(
tf.layers.dense,
activation=activation,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),
kernel_regularizer=tf.contrib.layers.l2_regularizer(l2_reg))
hidden = my_dense_layer(X, n_neurons, name="hidden")
outputs = my_dense_layer(hidden, n_inputs, activation=None, name="outputs")
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([reconstruction_loss] + reg_losses)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session(graph=graph) as sess:
init.run()
for epoch in range(n_epochs):
n_batches = len(X_train) // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
indices = rnd.permutation(len(X_train))[:batch_size]
X_batch = X_train[indices]
sess.run(training_op, feed_dict={X: X_batch})
loss_train = reconstruction_loss.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", loss_train)
params = dict([(var.name, var.eval()) for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)])
hidden_val = hidden.eval(feed_dict={X: X_train})
return hidden_val, params["hidden/kernel:0"], params["hidden/bias:0"], params["outputs/kernel:0"], params["outputs/bias:0"]
hidden_output, W1, b1, W4, b4 = train_autoencoder(mnist.train.images, n_neurons=300, n_epochs=4, batch_size=150)
_, W2, b2, W3, b3 = train_autoencoder(hidden_output, n_neurons=150, n_epochs=4, batch_size=150)
reset_graph()
n_inputs = 28*28
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden1 = tf.nn.elu(tf.matmul(X, W1) + b1)
hidden2 = tf.nn.elu(tf.matmul(hidden1, W2) + b2)
hidden3 = tf.nn.elu(tf.matmul(hidden2, W3) + b3)
outputs = tf.matmul(hidden3, W4) + b4
show_reconstructed_digits(X, outputs)
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
l2_reg = 0.0001
activation = tf.nn.elu
regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_hidden2])
weights3_init = initializer([n_hidden2, n_hidden3])
weights4_init = initializer([n_hidden3, n_outputs])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
weights3 = tf.Variable(weights3_init, dtype=tf.float32, name="weights3")
weights4 = tf.Variable(weights4_init, dtype=tf.float32, name="weights4")
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_hidden2), name="biases2")
biases3 = tf.Variable(tf.zeros(n_hidden3), name="biases3")
biases4 = tf.Variable(tf.zeros(n_outputs), name="biases4")
hidden1 = activation(tf.matmul(X, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
hidden3 = activation(tf.matmul(hidden2, weights3) + biases3)
outputs = tf.matmul(hidden3, weights4) + biases4
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X))
optimizer = tf.train.AdamOptimizer(learning_rate)
with tf.name_scope("phase1"):
phase1_outputs = tf.matmul(hidden1, weights4) + biases4 # bypass hidden2 and hidden3
phase1_reconstruction_loss = tf.reduce_mean(tf.square(phase1_outputs - X))
phase1_reg_loss = regularizer(weights1) + regularizer(weights4)
phase1_loss = phase1_reconstruction_loss + phase1_reg_loss
phase1_training_op = optimizer.minimize(phase1_loss)
with tf.name_scope("phase2"):
phase2_reconstruction_loss = tf.reduce_mean(tf.square(hidden3 - hidden1))
phase2_reg_loss = regularizer(weights2) + regularizer(weights3)
phase2_loss = phase2_reconstruction_loss + phase2_reg_loss
train_vars = [weights2, biases2, weights3, biases3]
phase2_training_op = optimizer.minimize(phase2_loss, var_list=train_vars) # freeze hidden1
init = tf.global_variables_initializer()
saver = tf.train.Saver()
training_ops = [phase1_training_op, phase2_training_op]
reconstruction_losses = [phase1_reconstruction_loss, phase2_reconstruction_loss]
n_epochs = [4, 4]
batch_sizes = [150, 150]
with tf.Session() as sess:
init.run()
for phase in range(2):
print("Training phase #{}".format(phase + 1))
for epoch in range(n_epochs[phase]):
n_batches = mnist.train.num_examples // batch_sizes[phase]
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_sizes[phase])
sess.run(training_ops[phase], feed_dict={X: X_batch})
loss_train = reconstruction_losses[phase].eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", loss_train)
saver.save(sess, "./my_model_one_at_a_time.ckpt")
loss_test = reconstruction_loss.eval(feed_dict={X: mnist.test.images})
print("Test MSE:", loss_test)
training_ops = [phase1_training_op, phase2_training_op]
reconstruction_losses = [phase1_reconstruction_loss, phase2_reconstruction_loss]
n_epochs = [4, 4]
batch_sizes = [150, 150]
with tf.Session() as sess:
init.run()
for phase in range(2):
print("Training phase #{}".format(phase + 1))
if phase == 1:
hidden1_cache = hidden1.eval(feed_dict={X: mnist.train.images})
for epoch in range(n_epochs[phase]):
n_batches = mnist.train.num_examples // batch_sizes[phase]
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
if phase == 1:
indices = rnd.permutation(mnist.train.num_examples)
hidden1_batch = hidden1_cache[indices[:batch_sizes[phase]]]
feed_dict = {hidden1: hidden1_batch}
sess.run(training_ops[phase], feed_dict=feed_dict)
else:
X_batch, y_batch = mnist.train.next_batch(batch_sizes[phase])
feed_dict = {X: X_batch}
sess.run(training_ops[phase], feed_dict=feed_dict)
loss_train = reconstruction_losses[phase].eval(feed_dict=feed_dict)
print("\r{}".format(epoch), "Train MSE:", loss_train)
saver.save(sess, "./my_model_cache_frozen.ckpt")
loss_test = reconstruction_loss.eval(feed_dict={X: mnist.test.images})
print("Test MSE:", loss_test)
n_test_digits = 2
X_test = mnist.test.images[:n_test_digits]
with tf.Session() as sess:
saver.restore(sess, "./my_model_one_at_a_time.ckpt") # not shown in the book
outputs_val = outputs.eval(feed_dict={X: X_test})
def plot_image(image, shape=[28, 28]):
plt.imshow(image.reshape(shape), cmap="Greys", interpolation="nearest")
plt.axis("off")
for digit_index in range(n_test_digits):
plt.subplot(n_test_digits, 2, digit_index * 2 + 1)
plot_image(X_test[digit_index])
plt.subplot(n_test_digits, 2, digit_index * 2 + 2)
plot_image(outputs_val[digit_index])
with tf.Session() as sess:
saver.restore(sess, "./my_model_one_at_a_time.ckpt") # not shown in the book
weights1_val = weights1.eval()
for i in range(5):
plt.subplot(1, 5, i + 1)
plot_image(weights1_val.T[i])
save_fig("extracted_features_plot") # not shown
plt.show() # not shown
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150
n_outputs = 10
learning_rate = 0.01
l2_reg = 0.0005
activation = tf.nn.elu
regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.int32, shape=[None])
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_hidden2])
weights3_init = initializer([n_hidden2, n_hidden3])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
weights3 = tf.Variable(weights3_init, dtype=tf.float32, name="weights3")
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_hidden2), name="biases2")
biases3 = tf.Variable(tf.zeros(n_hidden3), name="biases3")
hidden1 = activation(tf.matmul(X, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
logits = tf.matmul(hidden2, weights3) + biases3
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
reg_loss = regularizer(weights1) + regularizer(weights2) + regularizer(weights3)
loss = cross_entropy + reg_loss
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
pretrain_saver = tf.train.Saver([weights1, weights2, biases1, biases2])
saver = tf.train.Saver()
n_epochs = 4
batch_size = 150
n_labeled_instances = 20000
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = n_labeled_instances // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
indices = rnd.permutation(n_labeled_instances)[:batch_size]
X_batch, y_batch = mnist.train.images[indices], mnist.train.labels[indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
print("\r{}".format(epoch), "Train accuracy:", accuracy_val, end=" ")
saver.save(sess, "./my_model_supervised.ckpt")
accuracy_val = accuracy.eval(feed_dict={X: mnist.test.images, y: mnist.test.labels})
print("Test accuracy:", accuracy_val)
n_epochs = 4
batch_size = 150
n_labeled_instances = 20000
#training_op = optimizer.minimize(loss, var_list=[weights3, biases3]) # Freeze layers 1 and 2 (optional)
with tf.Session() as sess:
init.run()
pretrain_saver.restore(sess, "./my_model_cache_frozen.ckpt")
for epoch in range(n_epochs):
n_batches = n_labeled_instances // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
indices = rnd.permutation(n_labeled_instances)[:batch_size]
X_batch, y_batch = mnist.train.images[indices], mnist.train.labels[indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
print("\r{}".format(epoch), "Train accuracy:", accuracy_val, end="\t")
saver.save(sess, "./my_model_supervised_pretrained.ckpt")
accuracy_val = accuracy.eval(feed_dict={X: mnist.test.images, y: mnist.test.labels})
print("Test accuracy:", accuracy_val)
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
noise_level = 1.0
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
X_noisy = X + noise_level * tf.random_normal(tf.shape(X))
hidden1 = tf.layers.dense(X_noisy, n_hidden1, activation=tf.nn.relu,
name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, # not shown in the book
name="hidden2") # not shown
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, # not shown
name="hidden3") # not shown
outputs = tf.layers.dense(hidden3, n_outputs, name="outputs") # not shown
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X)) # MSE
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(reconstruction_loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 10
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
loss_train = reconstruction_loss.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", loss_train)
saver.save(sess, "./my_model_stacked_denoising_gaussian.ckpt")
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
dropout_rate = 0.3
training = tf.placeholder_with_default(False, shape=(), name='training')
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
X_drop = tf.layers.dropout(X, dropout_rate, training=training)
hidden1 = tf.layers.dense(X_drop, n_hidden1, activation=tf.nn.relu,
name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, # not shown in the book
name="hidden2") # not shown
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, # not shown
name="hidden3") # not shown
outputs = tf.layers.dense(hidden3, n_outputs, name="outputs") # not shown
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X)) # MSE
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(reconstruction_loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 10
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, training: True})
loss_train = reconstruction_loss.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", loss_train)
saver.save(sess, "./my_model_stacked_denoising_dropout.ckpt")
show_reconstructed_digits(X, outputs, "./my_model_stacked_denoising_dropout.ckpt")
p = 0.1
q = np.linspace(0.001, 0.999, 500)
kl_div = p * np.log(p / q) + (1 - p) * np.log((1 - p) / (1 - q))
mse = (p - q)**2
plt.plot([p, p], [0, 0.3], "k:")
plt.text(0.05, 0.32, "Target\nsparsity", fontsize=14)
plt.plot(q, kl_div, "b-", label="KL divergence")
plt.plot(q, mse, "r--", label="MSE")
plt.legend(loc="upper left")
plt.xlabel("Actual sparsity")
plt.ylabel("Cost", rotation=0)
plt.axis([0, 1, 0, 0.95])
save_fig("sparsity_loss_plot")
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 1000 # sparse codings
n_outputs = n_inputs
def kl_divergence(p, q):
# Kullback Leibler divergence
return p * tf.log(p / q) + (1 - p) * tf.log((1 - p) / (1 - q))
learning_rate = 0.01
sparsity_target = 0.1
sparsity_weight = 0.2
X = tf.placeholder(tf.float32, shape=[None, n_inputs]) # not shown in the book
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.sigmoid) # not shown
outputs = tf.layers.dense(hidden1, n_outputs) # not shown
hidden1_mean = tf.reduce_mean(hidden1, axis=0) # batch mean
sparsity_loss = tf.reduce_sum(kl_divergence(sparsity_target, hidden1_mean))
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X)) # MSE
loss = reconstruction_loss + sparsity_weight * sparsity_loss
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 100
batch_size = 1000
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
reconstruction_loss_val, sparsity_loss_val, loss_val = sess.run([reconstruction_loss, sparsity_loss, loss], feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", reconstruction_loss_val, "\tSparsity loss:", sparsity_loss_val, "\tTotal loss:", loss_val)
saver.save(sess, "./my_model_sparse.ckpt")
show_reconstructed_digits(X, outputs, "./my_model_sparse.ckpt")
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.sigmoid)
logits = tf.layers.dense(hidden1, n_outputs)
outputs = tf.nn.sigmoid(logits)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=X, logits=logits)
reconstruction_loss = tf.reduce_mean(xentropy)
reset_graph()
from functools import partial
n_inputs = 28 * 28
n_hidden1 = 500
n_hidden2 = 500
n_hidden3 = 20 # codings
n_hidden4 = n_hidden2
n_hidden5 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.001
initializer = tf.contrib.layers.variance_scaling_initializer()
my_dense_layer = partial(
tf.layers.dense,
activation=tf.nn.elu,
kernel_initializer=initializer)
X = tf.placeholder(tf.float32, [None, n_inputs])
hidden1 = my_dense_layer(X, n_hidden1)
hidden2 = my_dense_layer(hidden1, n_hidden2)
hidden3_mean = my_dense_layer(hidden2, n_hidden3, activation=None)
hidden3_sigma = my_dense_layer(hidden2, n_hidden3, activation=None)
noise = tf.random_normal(tf.shape(hidden3_sigma), dtype=tf.float32)
hidden3 = hidden3_mean + hidden3_sigma * noise
hidden4 = my_dense_layer(hidden3, n_hidden4)
hidden5 = my_dense_layer(hidden4, n_hidden5)
logits = my_dense_layer(hidden5, n_outputs, activation=None)
outputs = tf.sigmoid(logits)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=X, logits=logits)
reconstruction_loss = tf.reduce_sum(xentropy)
eps = 1e-10 # smoothing term to avoid computing log(0) which is NaN
latent_loss = 0.5 * tf.reduce_sum(
tf.square(hidden3_sigma) + tf.square(hidden3_mean)
- 1 - tf.log(eps + tf.square(hidden3_sigma)))
loss = reconstruction_loss + latent_loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 50
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
loss_val, reconstruction_loss_val, latent_loss_val = sess.run([loss, reconstruction_loss, latent_loss], feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train total loss:", loss_val, "\tReconstruction loss:", reconstruction_loss_val, "\tLatent loss:", latent_loss_val)
saver.save(sess, "./my_model_variational.ckpt")
reset_graph()
from functools import partial
n_inputs = 28 * 28
n_hidden1 = 500
n_hidden2 = 500
n_hidden3 = 20 # codings
n_hidden4 = n_hidden2
n_hidden5 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.001
initializer = tf.contrib.layers.variance_scaling_initializer()
my_dense_layer = partial(
tf.layers.dense,
activation=tf.nn.elu,
kernel_initializer=initializer)
X = tf.placeholder(tf.float32, [None, n_inputs])
hidden1 = my_dense_layer(X, n_hidden1)
hidden2 = my_dense_layer(hidden1, n_hidden2)
hidden3_mean = my_dense_layer(hidden2, n_hidden3, activation=None)
hidden3_gamma = my_dense_layer(hidden2, n_hidden3, activation=None)
noise = tf.random_normal(tf.shape(hidden3_gamma), dtype=tf.float32)
hidden3 = hidden3_mean + tf.exp(0.5 * hidden3_gamma) * noise
hidden4 = my_dense_layer(hidden3, n_hidden4)
hidden5 = my_dense_layer(hidden4, n_hidden5)
logits = my_dense_layer(hidden5, n_outputs, activation=None)
outputs = tf.sigmoid(logits)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=X, logits=logits)
reconstruction_loss = tf.reduce_sum(xentropy)
latent_loss = 0.5 * tf.reduce_sum(
tf.exp(hidden3_gamma) + tf.square(hidden3_mean) - 1 - hidden3_gamma)
loss = reconstruction_loss + latent_loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
import numpy as np
n_digits = 60
n_epochs = 50
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="") # not shown in the book
sys.stdout.flush() # not shown
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
loss_val, reconstruction_loss_val, latent_loss_val = sess.run([loss, reconstruction_loss, latent_loss], feed_dict={X: X_batch}) # not shown
print("\r{}".format(epoch), "Train total loss:", loss_val, "\tReconstruction loss:", reconstruction_loss_val, "\tLatent loss:", latent_loss_val) # not shown
saver.save(sess, "./my_model_variational.ckpt") # not shown
codings_rnd = np.random.normal(size=[n_digits, n_hidden3])
outputs_val = outputs.eval(feed_dict={hidden3: codings_rnd})
plt.figure(figsize=(8,50)) # not shown in the book
for iteration in range(n_digits):
plt.subplot(n_digits, 10, iteration + 1)
plot_image(outputs_val[iteration])
n_rows = 6
n_cols = 10
plot_multiple_images(outputs_val.reshape(-1, 28, 28), n_rows, n_cols)
save_fig("generated_digits_plot")
plt.show()
latent_loss = 0.5 * tf.reduce_sum(
tf.exp(hidden3_gamma) + tf.square(hidden3_mean) - 1 - hidden3_gamma)
n_digits = 3
X_test, y_test = mnist.test.next_batch(batch_size)
codings = hidden3
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
codings_val = codings.eval(feed_dict={X: X_test})
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
outputs_val = outputs.eval(feed_dict={codings: codings_val})
fig = plt.figure(figsize=(8, 2.5 * n_digits))
for iteration in range(n_digits):
plt.subplot(n_digits, 2, 1 + 2 * iteration)
plot_image(X_test[iteration])
plt.subplot(n_digits, 2, 2 + 2 * iteration)
plot_image(outputs_val[iteration])
n_iterations = 3
n_digits = 6
codings_rnd = np.random.normal(size=[n_digits, n_hidden3])
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
target_codings = np.roll(codings_rnd, -1, axis=0)
for iteration in range(n_iterations + 1):
codings_interpolate = codings_rnd + (target_codings - codings_rnd) * iteration / n_iterations
outputs_val = outputs.eval(feed_dict={codings: codings_interpolate})
plt.figure(figsize=(11, 1.5*n_iterations))
for digit_index in range(n_digits):
plt.subplot(1, n_digits, digit_index + 1)
plot_image(outputs_val[digit_index])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: After a period of near steady state, we have a very large change. Assume the change is past the limit of the aircraft's flight envelope. Nonetheless the Kalman filter incorporates that new measurement into the filter based on the current Kalman gain. It cannot reject the noise because the measurement could reflect the initiation of a turn. Granted it is unlikely that we are turning so abruptly, but it is impossible to say whether
Step3: Given these future measurements we can infer that yes, the aircraft initiated a turn.
Step4: In this case we are led to conclude that the aircraft did not turn and that the outlying measurement was merely very noisy.
Step5: I've injected a lot of noise into the signal to allow you to visually distinguish the RTS output from the ideal output. In the graph above we can see that the Kalman filter, drawn as the green dotted line, is reasonably smooth compared to the input, but it still wanders from from the ideal line when several measurements in a row are biased towards one side of the line. In contrast, the RTS output is both extremely smooth and very close to the ideal output.
Step6: However, we must understand that this smoothing is predicated on the system model. We have told the filter that what we are tracking follows a constant velocity model with very low process error. When the filter looks ahead it sees that the future behavior closely matches a constant velocity so it is able to reject most of the noise in the signal. Suppose instead our system has a lot of process noise. For example, if we are tracking a light aircraft in gusty winds its velocity will change often, and the filter will be less able to distinguish between noise and erratic movement due to the wind. We can see this in the next graph.
Step7: This underscores the fact that these filters are not smoothing the data in colloquial sense of the term. The filter is making an optimal estimate based on previous measurements, future measurements, and what you tell it about the behavior of the system and the noise in the system and measurements.
Step8: The improvement in the velocity, which is an hidden variable, is even more dramatic.
Step9: At step $k$ we can estimate $x_k$ using the normal Kalman filter equations. However, we can make a better estimate for $x_{k-1}$ by using the measurement received for $x_k$. Likewise, we can make a better estimate for $x_{k-2}$ by using the measurements recevied for $x_{k-1}$ and $x_{k}$. We can extend this computation back for an arbitrary $N$ steps.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
%matplotlib inline
#format the book
import book_format
book_format.set_style()
import matplotlib.pyplot as plt
data = [10.1, 10.2, 9.8, 10.1, 10.2, 10.3,
10.1, 9.9, 10.2, 10.0, 9.9, 11.4]
plt.plot(data)
plt.xlabel('time')
plt.ylabel('position');
data2 = [11.3, 12.1, 13.3, 13.9, 14.5, 15.2]
plt.plot(data + data2);
data3 = [9.8, 10.2, 9.9, 10.1, 10.0, 10.3, 9.9, 10.1]
plt.plot(data + data3);
import numpy as np
from numpy import random
from numpy.random import randn
import matplotlib.pyplot as plt
from filterpy.kalman import KalmanFilter
import kf_book.book_plots as bp
def plot_rts(noise, Q=0.001, show_velocity=False):
random.seed(123)
fk = KalmanFilter(dim_x=2, dim_z=1)
fk.x = np.array([0., 1.]) # state (x and dx)
fk.F = np.array([[1., 1.],
[0., 1.]]) # state transition matrix
fk.H = np.array([[1., 0.]]) # Measurement function
fk.P = 10. # covariance matrix
fk.R = noise # state uncertainty
fk.Q = Q # process uncertainty
# create noisy data
zs = np.asarray([t + randn()*noise for t in range (40)])
# filter data with Kalman filter, than run smoother on it
mu, cov, _, _ = fk.batch_filter(zs)
M, P, C, _ = fk.rts_smoother(mu, cov)
# plot data
if show_velocity:
index = 1
print('gu')
else:
index = 0
if not show_velocity:
bp.plot_measurements(zs, lw=1)
plt.plot(M[:, index], c='b', label='RTS')
plt.plot(mu[:, index], c='g', ls='--', label='KF output')
if not show_velocity:
N = len(zs)
plt.plot([0, N], [0, N], 'k', lw=2, label='track')
plt.legend(loc=4)
plt.show()
plot_rts(7.)
plot_rts(noise=1.)
plot_rts(noise=7., Q=.1)
plot_rts(7.,show_velocity=True)
from kf_book.book_plots import figsize
from kf_book.smoothing_internal import *
with figsize(y=2):
show_fixed_lag_numberline()
from filterpy.kalman import FixedLagSmoother, KalmanFilter
import numpy.random as random
fls = FixedLagSmoother(dim_x=2, dim_z=1, N=8)
fls.x = np.array([0., .5])
fls.F = np.array([[1.,1.],
[0.,1.]])
fls.H = np.array([[1.,0.]])
fls.P *= 200
fls.R *= 5.
fls.Q *= 0.001
kf = KalmanFilter(dim_x=2, dim_z=1)
kf.x = np.array([0., .5])
kf.F = np.array([[1.,1.],
[0.,1.]])
kf.H = np.array([[1.,0.]])
kf.P *= 200
kf.R *= 5.
kf.Q *= 0.001
N = 4 # size of lag
nom = np.array([t/2. for t in range (0, 40)])
zs = np.array([t + random.randn()*5.1 for t in nom])
for z in zs:
fls.smooth(z)
kf_x, _, _, _ = kf.batch_filter(zs)
x_smooth = np.array(fls.xSmooth)[:, 0]
fls_res = abs(x_smooth - nom)
kf_res = abs(kf_x[:, 0] - nom)
plt.plot(zs,'o', alpha=0.5, marker='o', label='zs')
plt.plot(x_smooth, label='FLS')
plt.plot(kf_x[:, 0], label='KF', ls='--')
plt.legend(loc=4)
print('standard deviation fixed-lag: {:.3f}'.format(np.mean(fls_res)))
print('standard deviation kalman: {:.3f}'.format(np.mean(kf_res)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: b) Load dataset
Step2: The 'Pregnant' column can only take on one of two (in this case) possabilities. Here 1 = pregnant, and 0 = not pregnant
Step3: We can see no features with significant correlation coefficents (i.e., $r$ values > 0.7)
Step4: 4. Evaluate Algorithms
Step5: b) Spot Check Algorithms
Step6: c) Select The Best Model
Step7: 5. Make predictions on validation dataset
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from pandas.tools.plotting import scatter_matrix
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import metrics
# find path to your RetailMart.xlsx
dataset = pd.read_excel(open('C:/Users/craigrshenton/Desktop/Dropbox/excel_data_sci/ch06/RetailMart.xlsx','rb'), sheetname=0)
dataset = dataset.drop('Unnamed: 17', 1) # drop empty col
dataset.rename(columns={'PREGNANT':'Pregnant'}, inplace=True)
dataset.rename(columns={'Home/Apt/ PO Box':'Residency'}, inplace=True) # add simpler col name
dataset.columns = [x.strip().replace(' ', '_') for x in dataset.columns] # python does not like spaces in var names
# shape
print(dataset.shape)
# types
print(dataset.dtypes)
# head
dataset.head()
# feature distribution
print(dataset.groupby('Implied_Gender').size())
# target distribution
print(dataset.groupby('Pregnant').size())
# correlation
r = dataset.corr(method='pearson')
id_matrix = np.identity(r.shape[0]) # create identity matrix
r = r-id_matrix # remove same-feature correlations
np.where( r > 0.7 )
# dummify gender variable
dummy_gender = pd.get_dummies(dataset['Implied_Gender'], prefix='Gender')
print(dummy_gender.head())
# dummify residency variable
dummy_resident = pd.get_dummies(dataset['Residency'], prefix='Resident')
print(dummy_resident.head())
# Drop catagorical variables
dataset = dataset.drop('Implied_Gender', 1)
dataset = dataset.drop('Residency', 1)
# Add dummy variables
dataset = pd.concat([dummy_gender.ix[:, 'Gender_M':],dummy_resident.ix[:, 'Resident_H':],dataset], axis=1)
dataset.head()
# Make clean dataframe for regression model
array = dataset.values
n_features = len(array[0])
X = array[:,0:n_features-1] # features
y = array[:,n_features-1] # target
# Split-out validation dataset
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y,
test_size=validation_size, random_state=seed)
# Spot-Check Algorithms
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = KFold(n_splits=10, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Compare Algorithms
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
lda = LinearDiscriminantAnalysis()
lda.fit(X_train, Y_train)
predictions = lda.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# predict probability of survival
y_pred_prob = lda.predict_proba(X_validation)[:, 1]
# plot ROC curve
fpr, tpr, thresholds = metrics.roc_curve(Y_validation, y_pred_prob)
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.show()
# calculate AUC
print(metrics.roc_auc_score(Y_validation, y_pred_prob))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 3
Step2: PRINCIPLE
Step3: PRINCIPLE
Step4: PRINCIPLE
Step5: Question 4
Step6: Now we can fit an ARIMA model on this (Explaining ARIMA is out of scope of this workshop)
|
<ASSISTANT_TASK:>
Python Code:
# Import the library we need, which is Pandas and Matplotlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Set some parameters to get good visuals - style to ggplot and size to 15,10
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (15, 10)
# Read the csv file of Monthwise Quantity and Price csv file we have.
df = pd.read_csv('MonthWiseMarketArrivals_clean.csv')
# Changing the date column to a Time Interval columnn
df.date = pd.DatetimeIndex(df.date)
# Change the index to the date column
df.index = pd.PeriodIndex(df.date, freq='M')
# Sort the data frame by date
df = df.sort_values(by = "date")
df.head()
dfBang = df[df.city == 'BANGALORE']
dfBang.head()
dfBang.plot(kind = "scatter", x = "quantity", y = "priceMod", s = 100)
dfBang.plot(kind = "scatter", x = "quantity", y = "priceMod", s = 100, alpha = 0.7, xlim = [0,2000000])
dfBang.corr()
pd.set_option('precision', 2)
dfBang.corr()
from pandas.tools.plotting import scatter_matrix
scatter_matrix(dfBang, figsize=(15, 15), diagonal='kde', s = 50)
import statsmodels.api as sm
x = dfBang.quantity
y = dfBang.priceMod
lm = sm.OLS(y, x).fit()
lm.summary()
# Import seaborn library for more funcitionality
import seaborn as sns
# We can try and fit a linear line to the data to see if there is a relaltionship
sns.regplot(x="quantity", y="priceMod", data=dfBang);
sns.jointplot(x="quantity", y="priceMod", data=dfBang, kind="reg");
# Set some parameters to get good visuals - style to ggplot and size to 15,10
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (15, 10)
dfBang.index = pd.DatetimeIndex(dfBang.date)
dfBang.head()
# Let us create a time series variable for priceMin
ts = dfBang.priceMin
ts.plot()
# We take the log transform to reduce the impact of high values
ts_log = np.log(ts)
ts_log.plot()
# One approach to remove the trend and seasonality impact is to take the difference between each observation
ts_log_diff = ts_log - ts_log.shift()
ts_log_diff.plot()
ts_log.plot()
# For smoothing the values we can use
# 12 month Moving Averages
ts_log_diff_ma = pd.rolling_mean(ts_log_diff, window = 12)
# Simple Exponential Smoothing
ts_log_diff_exp = pd.ewma(ts_log_diff, halflife=24)
ts_log_diff_ma.plot()
ts_log_diff_exp.plot()
ts_log_diff.plot()
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(ts_log, order=(0, 1, 2))
results_MA = model.fit(disp=-1)
plt.plot(ts_log_diff)
plt.plot(results_MA.fittedvalues, color='blue')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pero, ¿y cómo hacemos para ordenarla de mayor a menor?. <br>
Step2: ¿Y si lo que quiero ordenar es una lista de registros?. <br>
Step3: Búsquedas en listas
Step4: También es muy fácil saber si un elemento no esta en la lista
Step5: En cambio, si lo que queremos es saber es dónde se encuentra el número 3 en la lista es
Step6: Ahora, para todos estos casos lo que hice fue buscar un elemento completo, es decir, que tenía que conocer todo lo que buscaba y no sólamente una parte, como podría ser el padrón de un alumno.
Step7: Funciones anónimas
Step8: Si bien no son funciones que se usen todos los días, se suelen usar cuando una función recibe otra función como parámetro (las funciones son un tipo de dato, por lo que se las pueden asignar a variables, y por lo tanto, también pueden ser parámetros).
Step9: Excepciones
Step10: Pero no hay que tenerle miedo a las excepciones, sólo hay que tenerlas en cuenta y controlarlas en el caso de que ocurran
Step11: Pero supongamos que implementamos la regla de tres de la siguiente forma
Step12: En cambio, si le pasamos 0 en el lugar de x
Step13: Acá podemos ver todo el traceback o stacktrace, que son el cómo se fueron llamando las distintas funciones entre sí hasta que llegamos al error. <br>
Step14: Pero en este caso igual muestra 0, por lo que si queremos, podemos poner los try/except incluso más arriba en el stacktrace
Step15: Todos los casos son distintos y no hay UN lugar ideal dónde capturar la excepción; es cuestión del desarrollador decidir dónde conviene ponerlo para cada problema.
Step16: En esos casos podemos capturar más de una excepción de la siguiente forma
Step17: Incluso, si queremos que los dos errores muestren el mismo mensaje podemos capturar ambas excepciones juntas
Step18: Jerarquía de excepciones
Step19: Y también como
Step20: Si bien siempre se puede poner Exception en lugar del tipo de excepción que se espera, no es una buena práctica de programación ya que se pueden esconder errores indeseados. Por ejemplo, un error de sintaxis.
Step21: Pero entonces, ¿por qué no poner ese código dentro del try-except?. Porque tal vez no queremos capturar con las cláusulas except lo que se ejecute en ese bloque de código
Step22: Lanzar excepciones
|
<ASSISTANT_TASK:>
Python Code:
lista_de_numeros = [1, 6, 3, 9, 5, 2]
lista_ordenada = sorted(lista_de_numeros)
print lista_ordenada
print lista_de_numeros
lista_de_numeros = [1, 6, 3, 9, 5, 2]
print sorted(lista_de_numeros, reverse=True)
def crear_curso():
curso = [
{'nombre': 'Rodriguez, Carlos', 'nota': 6, 'padron': 98128},
{'nombre': 'Perez, Lucas', 'nota': 6, 'padron': 93453},
{'nombre': 'Gonzalez, Ramiro', 'nota': 8, 'padron': 93716},
{'nombre': 'Gonzalez, Carlos', 'nota': 6, 'padron': 90464},
{'nombre': 'Lopez, Carlos', 'nota': 7, 'padron': 98569}
]
return curso
def imprimir_curso(lista):
for idx, x in enumerate(lista):
msg = ' {pos:2}. {padron} - {nombre}: {nota}'
print msg.format(pos=idx, **x)
def obtener_padron(alumno):
return alumno['padron']
curso = crear_curso()
print 'La lista tiene los alumnos:'
imprimir_curso(curso)
lista_ordenada = sorted(curso, key=obtener_padron)
print 'Y la lista ordenada por padrón:'
imprimir_curso(lista_ordenada)
lista = [11, 4, 6, 1, 3, 5, 7]
if 3 in lista:
print '3 esta en la lista'
else:
print '3 no esta en la lista'
if 15 in lista:
print '15 esta en la lista'
else:
print '15 no esta en la lista'
lista = [11, 4, 6, 1, 3, 5, 7]
if 3 not in lista:
print '3 NO esta en la lista'
else:
print '3 SI esta en la lista'
lista = [11, 4, 6, 1, 3, 5, 7]
pos = lista.index(3)
print 'El 3 se encuentra en la posición', pos
pos = lista.index(15)
print 'El 15 se encuentra en la posición', pos
curso = crear_curso()
print 'La lista tiene los alumnos:'
imprimir_curso(curso)
alumno_93716 = (alumno for alumno in curso if alumno['padron'] == 93716).next()
print 'El alumno de padron 93716 se llama {nombre}'.format(**alumno_93716)
help("lambda")
mi_funcion = lambda x, y: x+y
resultado = mi_funcion(1, 2)
print resultado
print type(mi_funcion)
def mi_funcion2(x, y):
return x + y
resultado = mi_funcion2(1, 2)
print resultado
print type(mi_funcion2)
curso = crear_curso()
print 'Curso original'
imprimir_curso(curso)
lista_ordenada = sorted(curso, key=lambda alumno: (-alumno['nota'], alumno['padron']))
print 'Curso ordenado'
imprimir_curso(lista_ordenada)
print 1/0
dividendo = 10
divisor = '0'
print 'Intentare hacer la división de {}/{}'.format(dividendo, divisor)
try:
resultado = dividendo / divisor
print resultado
except ZeroDivisionError:
print 'No se puede hacer la división ya que el divisor es 0.'
except TypeError:
print 'Alguno de los parametros no es un número'
print 'Algo'
def dividir(x, y):
return x/y
def regla_de_tres(x, y, z):
return dividir(z*y, x)
# Si de 28 alumnos, aprobaron 15, el porcentaje de aprobados es de...
porcentaje_de_aprobados = regla_de_tres(28, 15, 100)
print 'Porcentaje de aprobados: {0:.2f}%'.format(porcentaje_de_aprobados)
resultado = regla_de_tres(0, 13, 100)
print 'Porcentaje de aprobados: {0:.2f}%'.format(resultado)
def dividir(x, y):
return x/y
def regla_de_tres(x, y, z):
resultado = 0
try:
resultado = dividir(z*y, x)
except ZeroDivisionError:
print 'No se puede calcular la regla de tres ' \
'porque el divisor es 0'
return resultado
print regla_de_tres(0, 1, 2)
def dividir(x, y):
return x/y
def regla_de_tres(x, y, z):
return dividir(z*y, x)
try:
print regla_de_tres(0, 1, 2)
except ZeroDivisionError:
print 'No se puede calcular la regla de tres ' \
'porque el divisor es 0'
def dividir_numeros(x, y):
try:
resultado = x/y
print 'El resultado es: %s' % resultado
except ZeroDivisionError:
print 'ERROR: Ha ocurrido un error por dividir por 0'
dividir_numeros(1, 0)
dividir_numeros(10, 2)
dividir_numeros("10", 2)
def dividir_numeros(x, y):
try:
resultado = x/y
print 'El resultado es: %s' % resultado
except TypeError:
print 'ERROR: Ha ocurrido un error por mezclar tipos de datos'
except ZeroDivisionError:
print 'ERROR: Ha ocurrido un error de división por cero'
except Exception:
print 'ERROR: Ha ocurrido un error inesperado'
dividir_numeros(1, 0)
dividir_numeros(10, 2)
dividir_numeros("10", 2)
def dividir_numeros(x, y):
try:
resultado = x/y
print 'El resultado es: %s' % resultado
except (ZeroDivisionError, TypeError):
print 'ERROR: No se puede calcular la división'
dividir_numeros(1, 0)
dividir_numeros(10, 2)
dividir_numeros("10", 2)
try:
print 1/0
except ZeroDivisionError:
print 'Ha ocurrido un error de división por cero'
try:
print 1/0
except Exception:
print 'Ha ocurrido un error inesperado'
def dividir_numeros(x, y):
try:
resultado = x/y
print 'El resultado es {}'.format(resultado)
except ZeroDivisionError:
print 'Error: División por cero'
else:
print 'Este mensaje se mostrará sólo si no ocurre ningún error'
finally:
print 'Este bloque de código se muestra siempre'
dividir_numeros(1, 0)
print '-------------'
dividir_numeros(10, 2)
def dividir_numeros(x, y):
try:
resultado = x/y
print 'El resultado es {}'.format(resultado)
except ZeroDivisionError:
print 'Error: División por cero'
else:
print 'Ahora hago que ocurra una excepción'
print 1/0
finally:
print 'Este bloque de código se muestra siempre'
dividir_numeros(1, 0)
print '-------------'
dividir_numeros(10, 2)
def dividir_numeros(x, y):
if y == 0:
raise Exception('Error de división por cero')
resultado = x/y
print 'El resultado es {0}'.format(resultado)
try:
dividir_numeros(1, 0)
except ZeroDivisionError as e:
print 'ERROR: División por cero'
except Exception as e:
print 'ERROR: ha ocurrido un error del tipo Exception'
print '----------'
dividir_numeros(1, 0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. 预处理
Step2: 3. 将训练数据切分为训练集(training set)和验证集(validation set)
Step3: 二、构建计算图
Step4: 2. 声明参数变量
Step5: 3. 构造前向传播计算图
Step6: 4. 声明代价函数
Step7: NOTE
Step8: 6. (optional) 计算准确率
Step9: 三、构建训练迭代 & 执行训练
Step10: 四、存储和加载模型参数
Step11: TensorBoard
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import pandas as pd
import tensorflow as tf
# read data from file
data = pd.read_csv('data/train.csv')
print(data.info())
# fill nan values with 0
data = data.fillna(0)
# convert ['male', 'female'] values of Sex to [1, 0]
data['Sex'] = data['Sex'].apply(lambda s: 1 if s == 'male' else 0)
# 'Survived' is the label of one class,
# add 'Deceased' as the other class
data['Deceased'] = data['Survived'].apply(lambda s: 1 - s)
# select features and labels for training
dataset_X = data[['Sex', 'Age', 'Pclass', 'SibSp', 'Parch', 'Fare']]
dataset_Y = data[['Deceased', 'Survived']]
print(dataset_X)
print(dataset_Y)
from sklearn.model_selection import train_test_split
# split training data and validation set data
X_train, X_val, y_train, y_val = train_test_split(dataset_X.as_matrix(), dataset_Y.as_matrix(),
test_size=0.2,
random_state=42)
# 声明输入数据占位符
# shape参数的第一个元素为None,表示可以同时放入任意条记录
X = tf.placeholder(tf.float32, shape=[None, 6], name='input')
y = tf.placeholder(tf.float32, shape=[None, 2], name='label')
# 声明变量
weights = tf.Variable(tf.random_normal([6, 2]), name='weights')
bias = tf.Variable(tf.zeros([2]), name='bias')
y_pred = tf.nn.softmax(tf.matmul(X, weights) + bias)
# 使用交叉熵作为代价函数
cross_entropy = - tf.reduce_sum(y * tf.log(y_pred + 1e-10),
reduction_indices=1)
# 批量样本的代价值为所有样本交叉熵的平均值
cost = tf.reduce_mean(cross_entropy)
# 使用随机梯度下降算法优化器来最小化代价,系统自动构建反向传播部分的计算图
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(cost)
# 计算准确率
correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(y_pred, 1))
acc_op = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
with tf.Session() as sess:
# variables have to be initialized at the first place
tf.global_variables_initializer().run()
# training loop
for epoch in range(10):
total_loss = 0.
for i in range(len(X_train)):
# prepare feed data and run
feed_dict = {X: [X_train[i]], y: [y_train[i]]}
_, loss = sess.run([train_op, cost], feed_dict=feed_dict)
total_loss += loss
# display loss per epoch
print('Epoch: %04d, total loss=%.9f' % (epoch + 1, total_loss))
print 'Training complete!'
# Accuracy calculated by TensorFlow
accuracy = sess.run(acc_op, feed_dict={X: X_val, y: y_val})
print("Accuracy on validation set: %.9f" % accuracy)
# Accuracy calculated by NumPy
pred = sess.run(y_pred, feed_dict={X: X_val})
correct = np.equal(np.argmax(pred, 1), np.argmax(y_val, 1))
numpy_accuracy = np.mean(correct.astype(np.float32))
print("Accuracy on validation set (numpy): %.9f" % numpy_accuracy)
# 训练步数记录
global_step = tf.Variable(0, name='global_step', trainable=False)
# 存档入口
saver = tf.train.Saver()
# 在Saver声明之后定义的变量将不会被存储
# non_storable_variable = tf.Variable(777)
ckpt_dir = './ckpt_dir'
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
with tf.Session() as sess:
tf.global_variables_initializer().run()
# 加载模型存档
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if ckpt and ckpt.model_checkpoint_path:
print('Restoring from checkpoint: %s' % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
start = global_step.eval()
for epoch in range(start, start + 10):
total_loss = 0.
for i in range(0, len(X_train)):
feed_dict = {
X: [X_train[i]],
y: [y_train[i]]
}
_, loss = sess.run([train_op, cost], feed_dict=feed_dict)
total_loss += loss
print('Epoch: %04d, loss=%.9f' % (epoch + 1, total_loss))
# 模型存档
global_step.assign(epoch).eval()
saver.save(sess, ckpt_dir + '/logistic.ckpt',
global_step=global_step)
print('Training complete!')
################################
# Constructing Dataflow Graph
################################
# arguments that can be set in command line
tf.app.flags.DEFINE_integer('epochs', 10, 'Training epochs')
tf.app.flags.DEFINE_integer('batch_size', 10, 'size of mini-batch')
FLAGS = tf.app.flags.FLAGS
with tf.name_scope('input'):
# create symbolic variables
X = tf.placeholder(tf.float32, shape=[None, 6])
y_true = tf.placeholder(tf.float32, shape=[None, 2])
with tf.name_scope('classifier'):
# weights and bias are the variables to be trained
weights = tf.Variable(tf.random_normal([6, 2]))
bias = tf.Variable(tf.zeros([2]))
y_pred = tf.nn.softmax(tf.matmul(X, weights) + bias)
# add histogram summaries for weights, view on tensorboard
tf.summary.histogram('weights', weights)
tf.summary.histogram('bias', bias)
# Minimise cost using cross entropy
# NOTE: add a epsilon(1e-10) when calculate log(y_pred),
# otherwise the result will be -inf
with tf.name_scope('cost'):
cross_entropy = - tf.reduce_sum(y_true * tf.log(y_pred + 1e-10),
reduction_indices=1)
cost = tf.reduce_mean(cross_entropy)
tf.summary.scalar('loss', cost)
# use gradient descent optimizer to minimize cost
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(cost)
with tf.name_scope('accuracy'):
correct_pred = tf.equal(tf.argmax(y_true, 1), tf.argmax(y_pred, 1))
acc_op = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Add scalar summary for accuracy
tf.summary.scalar('accuracy', acc_op)
global_step = tf.Variable(0, name='global_step', trainable=False)
# use saver to save and restore model
saver = tf.train.Saver()
# this variable won't be stored, since it is declared after tf.train.Saver()
non_storable_variable = tf.Variable(777)
ckpt_dir = './ckpt_dir'
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
################################
# Training the model
################################
# use session to run the calculation
with tf.Session() as sess:
# create a log writer. run 'tensorboard --logdir=./logs'
writer = tf.summary.FileWriter('./logs', sess.graph)
merged = tf.summary.merge_all()
# variables have to be initialized at the first place
tf.global_variables_initializer().run()
# restore variables from checkpoint if exists
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if ckpt and ckpt.model_checkpoint_path:
print('Restoring from checkpoint: %s' % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
start = global_step.eval()
# training loop
for epoch in range(start, start + FLAGS.epochs):
total_loss = 0.
for i in range(0, len(X_train), FLAGS.batch_size):
# train with mini-batch
feed_dict = {
X: X_train[i: i + FLAGS.batch_size],
y_true: y_train[i: i + FLAGS.batch_size]
}
_, loss = sess.run([train_op, cost], feed_dict=feed_dict)
total_loss += loss
# display loss per epoch
print('Epoch: %04d, loss=%.9f' % (epoch + 1, total_loss))
summary, accuracy = sess.run([merged, acc_op],
feed_dict={X: X_val, y_true: y_val})
writer.add_summary(summary, epoch) # Write summary
print('Accuracy on validation set: %.9f' % accuracy)
# set and update(eval) global_step with epoch
global_step.assign(epoch).eval()
saver.save(sess, ckpt_dir + '/logistic.ckpt',
global_step=global_step)
print('Training complete!')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Why is this formalism a useful probabalistic problem solving tool?
Step2: And now, for some data
Step3: A pivot table might give another useful summary
Step4: Learn Parameters of Graph Model give Data
Step5: Some choices
Step6: Now the nodes have conditional probability information stored in them. For example,
Step7: Now let's look at a downstream node.
Step8: Causal Reasoning
Step9: Learn Graph Structure
Step10: different model, so learn new parameters
Step11: Queries with New Model
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
G=nx.DiGraph()
G.add_edge('sex','height',weight=0.6)
nx.draw_networkx(G, node_color='y',node_size=2000, width=3)
plt.axis('off')
plt.show()
import numpy as np
import pandas as pd
import csv
import json
from libpgm.graphskeleton import GraphSkeleton
from libpgm.nodedata import NodeData
from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork
from libpgm.tablecpdfactorization import TableCPDFactorization
from libpgm.pgmlearner import PGMLearner
titanic = pd.DataFrame.from_csv("./data/titanic3.csv", index_col = None)
titanic.head()
titanic.describe()
ptable = pd.pivot_table(titanic, values=["name"], columns=["survived", "pclass","sex"], aggfunc=lambda x: len(x.unique()), margins=True)
print ptable
# housekeeping
# libpgm needs data as node:value list for each row
with open("./data/titanic3.csv") as f:
rdr = csv.reader(f, )
headers = next(rdr, None)
data = [{k:float(v) for k,v in zip(headers, row) if k !="name"} for row in rdr]
headers.remove("name") # not going to model survival based on name
#print data
pgn = {
"V": headers,
"E": [["age", "pclass"],
["sex", "survived"],
["pclass", "survived"]],
"Vdata": None }
# print pgn
G=nx.DiGraph()
for f,t in pgn["E"]:
G.add_edge(f,t)
nx.draw_networkx(G, node_color='y',node_size=2000, width=3)
plt.axis('off')
plt.show()
skel = GraphSkeleton()
skel.V = pgn["V"]
skel.E = pgn["E"]
skel.toporder()
learner = PGMLearner()
result = learner.discrete_mle_estimateparams(skel, data)
pd.DataFrame(result.Vdata["sex"]["cprob"]).transpose()
pd.DataFrame(result.Vdata["age"]["cprob"]).transpose()
pd.DataFrame(result.Vdata["pclass"]["cprob"]).transpose()
# use our solutions from above
nd = NodeData()
nd.Vdata = result.Vdata
nd.alldata = None
bn = DiscreteBayesianNetwork(skel, nd)
# query alters tables
tcpd = TableCPDFactorization(bn)
print "What is p(male=0)? {:.3%}".format(
tcpd.specificquery(dict(sex=[1]), dict())
)
tcpd = TableCPDFactorization(bn)
print "What is p(female=1)? {:.3%}".format(
tcpd.specificquery(dict(sex=[0]), dict())
)
# query alters tables
tcpd = TableCPDFactorization(bn)
print "What is p(female=1,survived=1)? {:.3%}".format(
tcpd.specificquery(dict(sex=[1]), dict(survived=1))
)
# query alters tables
tcpd = TableCPDFactorization(bn)
print "What is p(male=0,survived=0)? {:.3%}".format(
tcpd.specificquery(dict(sex=[0]), dict(survived=0))
)
# query alters tables
tcpd = TableCPDFactorization(bn)
print "What is p(male=0,class=3,survived=0)? {:.3%}".format(
tcpd.specificquery(dict(sex=[0],pclass=[3.0]), dict(survived=0))
)
# maybe useful for comparison
pd.pivot_table(titanic, values=["name"], columns=["sex", "pclass","survived"], aggfunc=lambda x: len(x.unique()))
# instantiate my learner
learner = PGMLearner()
# estimate structure
result = learner.lg_constraint_estimatestruct(data, indegree=1)
# output
print json.dumps(result.E, indent=2)
print json.dumps(result.V, indent=2)
G=nx.DiGraph()
for f,t in result.E:
G.add_edge(f,t,weight=0.6)
nx.draw_networkx(G, node_color='y',node_size=2000, width=3)
plt.axis('off')
plt.show()
skel = GraphSkeleton()
skel.V = result.V
skel.E = result.E
skel.toporder()
learner = PGMLearner()
result = learner.discrete_mle_estimateparams(skel, data)
nd = NodeData()
nd.Vdata = result.Vdata
nd.alldata = None
bn = DiscreteBayesianNetwork(skel, nd)
# query alters tables
tcpd = TableCPDFactorization(bn)
print "What is p(male=0)? {:.3%}".format(
tcpd.specificquery(dict(sex=[1]), dict())
)
tcpd = TableCPDFactorization(bn)
print "What is p(female=1)? {:.3%}".format(
tcpd.specificquery(dict(sex=[0]), dict())
)
# query alters tables
tcpd = TableCPDFactorization(bn)
print "What is p(female=1,survived=1)? {:.3%}".format(
tcpd.specificquery(dict(sex=[1]), dict(survived=1))
)
# query alters tables
tcpd = TableCPDFactorization(bn)
print "What is p(male=0,survived=0)? {:.3%}".format(
tcpd.specificquery(dict(sex=[0]), dict(survived=0))
)
# query alters tables
tcpd = TableCPDFactorization(bn)
print "What is p(male=0,class=3,survived=0)? {:.3%}".format(
tcpd.specificquery(dict(sex=[0],pclass=[3.0]), dict(survived=0))
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use new clade groups
|
<ASSISTANT_TASK:>
Python Code:
editdist_sp = [ (sp1,sp2,editdistance.eval(sp1,sp2)) for sp1,sp2 in itertools.combinations(read_annot["species_fillna"].unique(),2) ]
editdist_df = pd.DataFrame.from_records(editdist_sp,columns=["sp1","sp2","edit_distance"])
editdist_df["similarity"] = editdist_df.apply(lambda r: (max(len(r["sp1"]),len(r["sp2"]))-r["edit_distance"])/ max(len(r["sp1"]),len(r["sp2"])),axis=1)
editdist_df.head()
cluster_membership = {}
clusters = []
#Single linkage clustering
for _,row in editdist_df[editdist_df.similarity > 0.65 ].sort_values("similarity",ascending=False).iterrows():
if row["sp1"] not in cluster_membership and row["sp2"] not in cluster_membership:
#Create new cluster and add them both
clusters.append([row["sp1"],row["sp2"]])
cluster_membership[row["sp1"]] = len(clusters)-1
cluster_membership[row["sp2"]] = len(clusters)-1
elif row["sp1"] in cluster_membership:
if row["sp2"] not in cluster_membership:
#Add sp2 to sp1 cluster
clusters[cluster_membership[row["sp1"]]].append(row["sp2"])
cluster_membership[row["sp2"]] = cluster_membership[row["sp1"]]
else:
if cluster_membership[row["sp1"]] != cluster_membership[row["sp2"]]:
#Combine clusters!
c1_pos = cluster_membership[row["sp1"]]
c2_pos = cluster_membership[row["sp2"]]
clusters[c1_pos] += clusters[c2_pos]
for c in clusters[c2_pos]:
cluster_membership[c] = c1_pos
clusters[c2_pos] = None
else:
#Row 2 is in cluster and row1 is not
clusters[cluster_membership[row["sp2"]]].append(row["sp1"])
cluster_membership[row["sp1"]] = cluster_membership[row["sp2"]]
cluster_names = [";".join(c) if c else None for c in clusters]
read_annot["sp_group"] = read_annot["species_fillna"].apply(lambda x: cluster_names[cluster_membership[x]] if x in cluster_membership and "unclassified" not in x else x)
df_counts = read_annot.groupby("sp_group").size().reset_index()
df_counts.columns = ["species","read_count"]
print(df_counts[df_counts.read_count >= 2].shape)
df_counts[df_counts.read_count >= 2].sort_values("read_count",ascending=False)
virus_count_histogram = df_counts[df_counts.read_count >= 3].groupby("read_count").size().reset_index()
sns.barplot(x=virus_count_histogram["read_count"],y=virus_count_histogram[0])
df_counts[df_counts.read_count >= 2].sort_values("read_count",ascending=False).to_csv("454_seqs_kaiju_greedy.filt_species.tsv",sep="\t",index=False)
!ls
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: LLQ
|
<ASSISTANT_TASK:>
Python Code:
regimen = clinical['Regimen Type'].ix[pts].dropna()
print regimen.value_counts()
regimen = regimen[regimen.map(regimen.value_counts()) > 10]
regimen = regimen.ix[pts].fillna('Other')
regimen = regimen.str.replace(' Based','')
regimen = regimen.ix[ti(duration != 'Control')]
regimen.value_counts()
adherence = clinical['adherence %'].ix[pts].dropna()
paofi = clinical['paofi total'].ix[pts].dropna()
beck = clinical['beck total'].ix[pts].dropna()
iadl = clinical.IADL.ix[pts].dropna()
age.name = 'Age (years)'
bmi.name = 'BMI'
control_color = (0.2980392156862745, 0.4470588235294118, 0.6901960784313725)
hiv_color = (0.3333333333333333, 0.6588235294117647, 0.40784313725490196)
sns.set_style(rc={'font.family': 'sans-serif', 'font.sans-serif': ['Arial'],
'font.size': 14})
fig, axs = subplots(4,2, figsize=(8, 10))
sns.violinplot(age, hiv, inner='None', ax=axs[0][0],
order=['HIV-','HIV+'])
axs[0][0].set_yticks([0,20,40,60,80])
sns.violinplot(bmi, hiv, inner='None', ax=axs[0][1],
order=['HIV-','HIV+'])
duration_m.hist(ax=axs[1][0], color=hiv_color)
axs[1][0].set_xlabel('Estimated HIV duration (months)')
sns.barplot(stage.dropna(), ax=axs[1][1], color=hiv_color)
o = [u'PI/NRTI', u'NNRTI/NRTI', u'3-class','Other']
sns.barplot(regimen, ax=axs[2][0], x_order=o, color=hiv_color)
adherence.dropna().hist(ax=axs[2][1], color=hiv_color, bins=15)
axs[2][1].set_xlabel('Drug adherence (%)')
beck.dropna().hist(ax=axs[3][0], color=hiv_color)
axs[3][0].set_xlabel('Beck depression inventory')
paofi.dropna().hist(ax=axs[3][1], color=hiv_color)
axs[3][1].set_xlabel('PAOFI')
letters = list(map(chr, range(97, 123)))[:8]
for i,ax in enumerate(axs.flatten()):
ax.text(-0.15, 1.15, letters[i].upper(), transform=ax.transAxes,
fontsize=20, fontweight='bold', va='top', ha='right')
if i > 1:
ax.set_ylabel('No. of patients')
prettify_ax(ax)
fig.tight_layout()
fig.savefig(FIGDIR + 'sup_clinical_params.pdf')
pd.crosstab(labs['LLQ PLASMA'], duration)
fisher_exact_test(duration=='HIV Long', labs['LLQ PLASMA']=='>LLQ')
c = ['LLQ PLASMA', 'PT', 'INR']
c = ['WBC','RBC','HGB','HCT','MCV','MCH','MCHC','Platelets']
fig, axs = subplots(4,2, figsize=(8,10))
axs = axs.flatten()
for i,b in enumerate(c):
labs[b].hist(ax=axs[i], color=hiv_color)
axs[i].set_xlabel(b)
axs[i].set_ylabel('No. of patients')
prettify_ax(axs[i])
letters = list(map(chr, range(97, 123)))[8:]
for i,ax in enumerate(axs.flatten()):
ax.text(-0.15, 1.15, letters[i].upper(), transform=ax.transAxes,
fontsize=20, fontweight='bold', va='top', ha='right')
prettify_ax(ax)
fig.tight_layout()
fig.savefig(FIGDIR + 'sup_clinical_blood.pdf')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Predictive Modeling
Step3: The Central Theses of Machine Learning
Step4: How to Fight Overfitting?
Step5: <span style="color
Step6: L1 Regularized Linear Regression Loss Function -- "LASSO"
Step7: <span style="color
Step8: L1 + L2 Regularized Linear Regression Loss Function -- "ElasticNet"
Step9: <span style="color
Step10: Cross Validation
Step11: <span style="color
Step12: K-Fold Cross Validation
Step13: Good
Step14: <span style="color
Step15: Putting It All Together
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from IPython.display import Image
def mean_squared_error(y_true, y_pred):
calculate the mean_squared_error given a vector of true ys and a vector of predicted ys
diff = y_true - y_pred
return np.dot(diff, diff) / len(diff)
def predict_test_values(model, X_train, y_train, X_test):
model.fit(X_train, y_train)
return model.predict(X_test)
def calc_train_and_test_error(model, X_train, y_train, X_test, y_test):
model.fit(X_train, y_train)
y_pred_train = model.predict(X_train)
y_pred_test = model.predict(X_test)
return mean_squared_error(y_train, y_pred_train), mean_squared_error(y_test, y_pred_test)
Image(url='http://radimrehurek.com/data_science_python/plot_bias_variance_examples_2.png')
# Ridge Regression in scikit-learn
from sklearn import linear_model
model_ridge = linear_model.Ridge(alpha = .5)
# once it's been fit, you can look at the learned beta values of the model with: model_ridge.coef_
# load overfitting data
with np.load('data/overfitting_data.npz') as data:
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']
model_lr = linear_model.LinearRegression()
model_ridge = linear_model.Ridge(alpha=1)
print "Linear Regression Training and Test Errors:"
print calc_train_and_test_error(model_lr, x_train, y_train, x_test, y_test)
print
print "Ridge Regression Training and Test Errors:"
print calc_train_and_test_error(model_ridge, x_train, y_train, x_test, y_test)
print
print "Sum of Linear Regression Coefficients:"
print np.sum(np.abs(model_lr.coef_))
print
print "Sum of Ridge Regression Coefficients:"
print np.sum(np.abs(model_ridge.coef_))
print
?linear_model.Ridge
# LASSO in scikit-learn
from sklearn import linear_model
model_lasso = linear_model.Lasso(alpha = 0.5)
# Write your code here
model_lasso = linear_model.Lasso(alpha=1)
print "Ridge Regression Training and Test Errors:"
print calc_train_and_test_error(model_lasso, x_train, y_train, x_test, y_test)
print
print "Sum of Ridge Regression Coefficients:"
print np.sum(np.abs(model_lasso.coef_))
print
n_disp_coefs = 10
print 'Linear Regression Coefficients:'
print model_lr.coef_[:n_disp_coefs]
print
print 'Ridge Regression Coefficients:'
print model_ridge.coef_[:n_disp_coefs]
print
print 'LASSO Coefficients:'
print model_lasso.coef_[:n_disp_coefs]
print
from sklearn import linear_model
model_en = linear_model.ElasticNet(alpha=0.5, l1_ratio=0.1)
# note: scikit learn's current implementation of ElasticNet isn't stable with l1_ratio <= 0.01
# Write your code here
model_en = linear_model.ElasticNet(alpha=1, l1_ratio=0.5)
print 'ElasticNet Errors:'
print calc_train_and_test_error(model_en, x_train, y_train, x_test, y_test)
print
print 'Sum of ElasticNet Coefficients'
print np.sum(np.abs(model_en.coef_))
print
n_disp_coefs = 10
print 'ElasticNet Coefficients:'
print model_en.coef_[:n_disp_coefs]
print
# a helper function for performing validation set cross validation
from sklearn.cross_validation import train_test_split
validation_portion = 0.1
seed = 1234
x_train_small, x_valid, y_train_small, y_valid = \
train_test_split(x_train, y_train, test_size=validation_portion, random_state=seed)
print 'Original Training Set Size:'
print x_train.shape, y_train.shape
print
print 'Reducted Training Set Size:'
print x_train_small.shape, y_train_small.shape
print
print 'Validation Set Size:'
print x_valid.shape, y_valid.shape
print
def validation_set_error(model, x_train, y_train, validation_portion=0.1, seed=1234):
# FILL IN YOUR CODE HERE
x_train_small, x_valid, y_train_small, y_valid = \
train_test_split(x_train, y_train, test_size=validation_portion, random_state=seed)
model.fit(x_train_small, y_train_small)
y_pred_valid = model.predict(x_valid)
return mean_squared_error(y_valid, y_pred_valid)
# set up models
model_lr_valid = linear_model.LinearRegression()
model_ridge_valid = linear_model.Ridge(alpha=10)
# calculate errors
valid_portion = .1
n_seeds = 5
print "Linear Regression Training and Test Errors:"
# FILL IN YOUR CODE HERE
print calc_train_and_test_error(model_lr_valid, x_train_small, y_train_small, x_test, y_test)
print
print "Linear Regression Validation Errors:"
# FILL IN YOUR CODE HERE
print validation_set_error(model_lr_valid, x_train, y_train, validation_portion=0.1, seed=1234)
print
for seed in range(n_seeds):
print validation_set_error(model_lr_valid, x_train, y_train, validation_portion=valid_portion, seed=seed)
print
print "Ridge Regression Training and Test Errors:"
# FILL IN YOUR CODE HERE
print calc_train_and_test_error(model_ridge_valid, x_train_small, y_train_small, x_test, y_test)
print
print "Ridge Regression Validation Errors:"
# FILL IN YOUR CODE HERE
print validation_set_error(model_ridge_valid, x_train, y_train, validation_portion=0.1, seed=1234)
print
for seed in range(n_seeds):
print validation_set_error(model_ridge_valid, x_train, y_train, validation_portion=valid_portion, seed=seed)
print
Image(url='https://chrisjmccormick.files.wordpress.com/2013/07/10_fold_cv.png')
# scikit learn provides a useful object to help you perform kfold cross validation
from sklearn.cross_validation import KFold
n_data = len(y_train)
fold_count = 0
for train_reduced_row_ids, valid_row_ids in KFold(n_data, n_folds=4):
print
print
print "FOLD %d:" % fold_count
print "-------"
print("train_ids:\n%s\n\nvalid_ids\n%s" % (train_reduced_row_ids, valid_row_ids))
x_train_reduced = x_train[train_reduced_row_ids]
y_train_reduced = y_train[train_reduced_row_ids]
x_valid = x_train[valid_row_ids]
y_valid = y_train[valid_row_ids]
fold_count += 1
# NOTE: KFolds isn't random at all. It's important to shuffle your data first before using it.
from sklearn.utils import shuffle
x_train_shuffled, y_train_shuffled = shuffle(x_train, y_train)
def kfold_error(model, x_train, y_train, k=4, seed=1234):
# FILL IN YOUR CODE HERE
# shuffle training data
x_train_shuffled, y_train_shuffled = shuffle(x_train, y_train, random_state=seed)
n_data = len(y_train)
error_sum = 0
for train_reduced_row_ids, valid_row_ids in KFold(n_data, n_folds=k):
x_train_reduced = x_train_shuffled[train_reduced_row_ids]
y_train_reduced = y_train_shuffled[train_reduced_row_ids]
x_valid = x_train_shuffled[valid_row_ids]
y_valid = y_train_shuffled[valid_row_ids]
model.fit(x_train_reduced, y_train_reduced)
y_valid_pred = model.predict(x_valid)
error_sum += mean_squared_error(y_valid, y_valid_pred)
return error_sum*1.0 / k
# set up models
model_lr_valid = linear_model.LinearRegression()
model_ridge_valid = linear_model.Ridge(alpha=10)
# calculate errors
n_seeds = 3
k = 5
print "Linear Regression Training and Test Errors:"
# FILL IN YOUR CODE HERE
print calc_train_and_test_error(model_lr_valid, x_train, y_train, x_test, y_test)
print
print "Linear Regression K-Fold Errors:"
# FILL IN YOUR CODE HERE
print
for seed in range(n_seeds):
print kfold_error(model_lr_valid, x_train, y_train, k=k, seed=seed)
print
print
print "Ridge Regression Training and Test Errors:"
# FILL IN YOUR CODE HERE
print calc_train_and_test_error(model_ridge_valid, x_train, y_train, x_test, y_test)
print
print "Ridge Regression K-Fold Errors:"
# FILL IN YOUR CODE HERE
print
for seed in range(n_seeds):
print kfold_error(model_ridge_valid, x_train, y_train, k=k, seed=seed)
print
[np.nan] + [1,2]
def model_name(model):
s = model.__str__().lower()
if "linearregression" in s:
return 'LinearRegression'
elif "lasso" in s:
return 'Lasso(a=%g)' % model.alpha
elif "ridge" in s:
return 'Ridge(a=%g)' % model.alpha
elif "elastic" in s:
return 'ElasticNet(a=%g, r=%g)' % (model.alpha, model.l1_ratio)
else:
raise ValueError("Unknown Model Type")
def create_models(alphas=(.01, .03, .1, .3, 1, 3), l1_ratios=(.7, .5, .3)):
models = [linear_model.LinearRegression()]
models.extend([linear_model.Ridge(a) for a in alphas])
models.extend([linear_model.Lasso(a) for a in alphas])
models.extend([linear_model.ElasticNet(a, l1_ratio=l) for a in alphas for l in l1_ratios])
return models
def results_df(models, betas_true, x_train, y_train, x_test, y_test, k=4):
n_data, n_dim = x_train.shape
n_zeros = n_dim - len(betas_true)
betas_true = np.concatenate([betas_true, np.zeros(n_zeros)])
# fit models to training data
[m.fit(x_train, y_train) for m in models]
betas = np.vstack([betas_true] + [m.coef_ for m in models])
beta_names = ['Beta ' + str(i) for i in range(n_dim)]
# set up model names
model_names = ["True Coefs"] + [model_name(m) for m in models]
df = pd.DataFrame(data=betas, columns=beta_names, index=model_names)
# calculate training errors
y_preds = [m.predict(x_train) for m in models]
errors = [np.nan] + [mean_squared_error(y_train, y_pred) for y_pred in y_preds]
df['Train Error'] = errors
# calculate validation errors
errors = [np.nan] + [kfold_error(m, x_train, y_train, k=k) for m in models]
df['Cross Validation Error'] = errors
# calculate test errors
y_preds = [m.predict(x_test) for m in models]
errors = [np.nan] + [mean_squared_error(y_test, y_pred) for y_pred in y_preds]
df['Test Error'] = errors
return df
# these are some of the magic parameters that I used to actually
# generate the overfitting dataset
n_dim = 598
n_dim_meaningful = 3
n_dim_disp_extra = 2
# the actual betas used to generate the y values. the rest were 0.
betas_true = np.arange(n_dim_meaningful) + 1
# create a whole bunch of untrained models
models = create_models(alphas=(.01, .03, .1, .3, 1), l1_ratios=(.9, .7, .5))
#
all_results = results_df(models, betas_true, x_train, y_train, x_test, y_test, k=4)
# decide which columns we want to display
disp_cols = ["Beta " + str(i) for i in range(n_dim_meaningful + n_dim_disp_extra)]
disp_cols += ['Train Error', 'Cross Validation Error', 'Test Error']
# display the results
all_results[disp_cols]
%matplotlib inline
import matplotlib.pyplot as plt
f = plt.figure()
plt.scatter(all_results['Cross Validation Error'], all_results['Test Error'])
plt.xlabel('Cross Validation Error')
plt.ylabel('Test Error')
f.set_size_inches(8, 8)
plt.show()
# scikit learn includes some functions for making cross validation easier
# and computationally faster for a some models
from sklearn import linear_model
model_ridge_cv = linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0])
model_lasso_cv = linear_model.LassoCV(alphas=[0.1, 1.0, 10.0])
model_en_cv = linear_model.ElasticNetCV(l1_ratio=[.9], n_alphas=100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Range
|
<ASSISTANT_TASK:>
Python Code:
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# Imprimindo números pares entre 50 e 101
for i in range(50, 101, 2):
print(i)
for i in range(3, 6):
print (i)
for i in range(0, -20, -2):
print(i)
lista = ['Morango', 'Banana', 'Abacaxi', 'Uva']
lista_tamanho = len(lista)
for i in range(0, lista_tamanho):
print(lista[i])
# Tudo em Python é um objeto
type(range(0,3))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can now listen to the resulting audio with the beats marked by beeps. We can also visualize beat estimations.
Step2: BPM histogram
Step3: BPM estimation with PercivalBpmEstimator
Step4: BPM estimation for audio loops
Step5: BPM estimation with TempoCNN
Step6: We can plot a slice of the waveform on top of a grid with the estimated tempo to get visual verification
Step7: TempoCNN operates on audio slices of 12 seconds with an overlap of 6 seconds by default. Additionally, the algorithm outputs the local estimations along with their probabilities. The global value is computed by majority voting by default. However, this method is only recommended when a constant tempo can be assumed.
|
<ASSISTANT_TASK:>
Python Code:
import essentia.standard as es
from tempfile import TemporaryDirectory
# Loading an audio file.
audio = es.MonoLoader(filename='../../../test/audio/recorded/dubstep.flac')()
# Compute beat positions and BPM.
rhythm_extractor = es.RhythmExtractor2013(method="multifeature")
bpm, beats, beats_confidence, _, beats_intervals = rhythm_extractor(audio)
print("BPM:", bpm)
print("Beat positions (sec.):", beats)
print("Beat estimation confidence:", beats_confidence)
# Mark beat positions in the audio and write it to a file.
# Use beeps instead of white noise to mark them, as it is more distinctive.
marker = es.AudioOnsetsMarker(onsets=beats, type='beep')
marked_audio = marker(audio)
# Write to an audio file in a temporary directory.
temp_dir = TemporaryDirectory()
es.MonoWriter(filename=temp_dir.name + '/dubstep_beats.flac')(marked_audio)
import IPython
IPython.display.Audio(temp_dir.name + '/dubstep_beats.flac')
from pylab import plot, show, figure, imshow
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (15, 6)
plot(audio)
for beat in beats:
plt.axvline(x=beat*44100, color='red')
plt.xlabel('Time (samples)')
plt.title("Audio waveform and the estimated beat positions")
show()
peak1_bpm, peak1_weight, peak1_spread, peak2_bpm, peak2_weight, peak2_spread, histogram = \
es.BpmHistogramDescriptors()(beats_intervals)
print("Overall BPM (estimated before): %0.1f" % bpm)
print("First histogram peak: %0.1f bpm" % peak1_bpm)
print("Second histogram peak: %0.1f bpm" % peak2_bpm)
fig, ax = plt.subplots()
ax.bar(range(len(histogram)), histogram, width=1)
ax.set_xlabel('BPM')
ax.set_ylabel('Frequency of occurrence')
plt.title("BPM histogram")
ax.set_xticks([20 * x + 0.5 for x in range(int(len(histogram) / 20))])
ax.set_xticklabels([str(20 * x) for x in range(int(len(histogram) / 20))])
plt.show()
# Loading an audio file.
audio = es.MonoLoader(filename='../../../test/audio/recorded/dubstep.flac')()
# Compute BPM.
bpm = es.PercivalBpmEstimator()(audio)
print("BPM:", bpm)
# Our input audio is indeed a well-cut loop. Let's compute the BPM.
bpm = es.LoopBpmEstimator()(audio)
print("Loop BPM:", bpm)
!curl -SLO https://essentia.upf.edu/models/tempo/tempocnn/deeptemp-k16-3.pb
import essentia.standard as es
sr = 11025
audio_11khz = es.MonoLoader(filename='../../../test/audio/recorded/techno_loop.wav', sampleRate=sr)()
global_bpm, local_bpm, local_probs = es.TempoCNN(graphFilename='deeptemp-k16-3.pb')(audio_11khz)
print('song BPM: {}'.format(global_bpm))
import numpy as np
duration = 5 # seconds
audio_slice = audio_11khz[:sr * duration]
plt.plot(audio_slice)
markers = np.arange(0, len(audio_slice), sr / (global_bpm / 60))
for marker in markers:
plt.axvline(x=marker, color='red')
plt.title("Audio waveform on top of a tempo grid")
show()
print('local BPM: {}'.format(local_bpm))
print('local probabilities: {}'.format(local_probs))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To compute the proportion of each type of family, I'll use Scipy to compute the binomial distribution.
Step2: And put the results into a Pandas Series.
Step3: But we also have the information frequencies of these families are proportional to 30%, 40%, and 10%, so we can multiply through.
Step5: So that's the (unnormalized) prior.
Step6: This function takes a prior and a likelihood and returns a DataFrame
Step7: So that's what we should believe about the family after the first update.
Step8: Here's the second update.
Step9: Based on the sister named Mary, we can rule out families with no girls, and families with more than one girls are more likely.
Step10: A convenient way to compute the total probability of an outcome is to do an update as if it happened, ignore the posterior probabilities, and compute the sum of the products.
Step11: At this point, there are only three family types left standing, (1,2), (2,2), and (1,3).
Step12: The Bayes factor
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
qs = [(2, 0),
(1, 1),
(0, 2),
(3, 0),
(2, 1),
(1, 2),
(0, 3),
(4, 0),
(3, 1),
(2, 2),
(1, 3),
(0, 4),
]
index = pd.MultiIndex.from_tuples(qs, names=['Boys', 'Girls'])
from scipy.stats import binom
boys = index.to_frame()['Boys']
girls = index.to_frame()['Girls']
ps = binom.pmf(girls, boys+girls, 0.5)
prior1 = pd.Series(ps, index, name='Prior')
pd.DataFrame(prior1)
ps = [30, 30, 30, 40, 40, 40, 40, 10, 10, 10, 10, 10]
prior1 *= ps
pd.DataFrame(prior1)
import pandas as pd
def make_table(prior, likelihood):
Make a DataFrame representing a Bayesian update.
table = pd.DataFrame(prior)
table.columns = ['Prior']
table['Likelihood'] = likelihood
table['Product'] = (table['Prior'] * table['Likelihood'])
total = table['Product'].sum()
table['Posterior'] = table['Product'] / total
return table
likelihood1 = prior1.index.to_frame()['Boys']
table1 = make_table(prior1, likelihood1)
table1
from scipy.stats import binom
ns = prior1.index.to_frame()['Girls']
p = 1 / 100
k = 1
likelihood2 = binom.pmf(k, ns, p)
likelihood2
prior2 = table1['Posterior']
table2 = make_table(prior2, likelihood2)
table2
ns = prior1.index.to_frame()['Girls'] - 1
ns.name = 'Additional sisters'
neg = (ns < 0)
ns[neg] = 0
pd.DataFrame(ns)
p = 1 / 10
k = 1
likelihood3 = binom.sf(k-1, ns, p)
likelihood3
prior3 = table2['Posterior']
table3 = make_table(prior3, likelihood3)
table3
p = table3['Product'].sum()
p
1/p
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Model Test/Validation Data
Step2: Look at the Model Graph In Tensorboard
Step3: Train Model
Step4: Look at the Train and Test Loss Summary In Tensorboard
Step5: Look at the Model On Disk
Step6: HACK
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow.python.client import timeline
import pylab
import numpy as np
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
tf.logging.set_verbosity(tf.logging.INFO)
tf.reset_default_graph()
num_samples = 100000
from datetime import datetime
version = int(datetime.now().strftime("%s"))
print(version)
x_train = np.random.rand(num_samples).astype(np.float32)
print(x_train)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_train = x_train * 0.1 + 0.3 + noise
print(y_train)
pylab.plot(x_train, y_train, '.')
x_test = np.random.rand(len(x_train)).astype(np.float32)
print(x_test)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_test = x_test * 0.1 + 0.3 + noise
print(y_test)
pylab.plot(x_train, y_train, '.')
with tf.device("/cpu:0"):
W = tf.get_variable(shape=[], name='weights')
print(W)
b = tf.get_variable(shape=[], name='bias')
print(b)
x_observed = tf.placeholder(shape=[None], dtype=tf.float32, name='x_observed')
print(x_observed)
with tf.device("/cpu:0"):
y_pred = W * x_observed + b
print(y_pred)
with tf.device("/cpu:0"):
y_observed = tf.placeholder(shape=[None], dtype=tf.float32, name='y_observed')
print(y_observed)
loss_op = tf.reduce_mean(tf.square(y_pred - y_observed))
optimizer_op = tf.train.GradientDescentOptimizer(0.025)
train_op = optimizer_op.minimize(loss_op)
print("loss:", loss_op)
print("optimizer:", optimizer_op)
print("train:", train_op)
with tf.device("/cpu:0"):
init_op = tf.global_variables_initializer()
print(init_op)
train_summary_writer = tf.summary.FileWriter('/root/tensorboard/linear/cpu/%s/train' % version, graph=tf.get_default_graph())
test_summary_writer = tf.summary.FileWriter('/root/tensorboard/linear/cpu/%s/test' % version, graph=tf.get_default_graph())
config = tf.ConfigProto(
log_device_placement=True,
)
print(config)
sess = tf.Session(config=config)
sess.run(init_op)
print(sess.run(W))
print(sess.run(b))
def test(x, y):
return sess.run(loss_op, feed_dict={x_observed: x, y_observed: y})
test(x=x_test, y=y_test)
loss_summary_scalar_op = tf.summary.scalar('loss', loss_op)
loss_summary_merge_all_op = tf.summary.merge_all()
%%time
max_steps = 400
run_metadata = tf.RunMetadata()
for step in range(max_steps):
if (step < max_steps):
test_summary_log, _ = sess.run([loss_summary_merge_all_op, loss_op], feed_dict={x_observed: x_test, y_observed: y_test})
train_summary_log, _ = sess.run([loss_summary_merge_all_op, train_op], feed_dict={x_observed: x_train, y_observed: y_train})
else:
test_summary_log, _ = sess.run([loss_summary_merge_all_op, loss_op], feed_dict={x_observed: x_test, y_observed: y_test})
train_summary_log, _ = sess.run([loss_summary_merge_all_op, train_op], feed_dict={x_observed: x_train, y_observed: y_train}, options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE), run_metadata=run_metadata)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open('cpu-timeline.json', 'w') as trace_file:
trace_file.write(trace.generate_chrome_trace_format(show_memory=True))
if step % 1 == 0:
print(step, sess.run([W, b]))
train_summary_writer.add_summary(train_summary_log, step)
train_summary_writer.flush()
test_summary_writer.add_summary(test_summary_log, step)
test_summary_writer.flush()
pylab.plot(x_train, y_train, '.', label="target")
pylab.plot(x_train, sess.run(y_pred, feed_dict={x_observed: x_train, y_observed: y_train}), ".", label="predicted")
pylab.legend()
pylab.ylim(0, 1.0)
test(x=x_test, y=y_test)
from tensorflow.python.saved_model import utils
tensor_info_x_observed = utils.build_tensor_info(x_observed)
print(tensor_info_x_observed)
tensor_info_y_pred = utils.build_tensor_info(y_pred)
print(tensor_info_y_pred)
export_path = "/root/models/linear/cpu/%s" % version
print(export_path)
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
with tf.device("/cpu:0"):
builder = saved_model_builder.SavedModelBuilder(export_path)
prediction_signature = signature_def_utils.build_signature_def(
inputs = {'x_observed': tensor_info_x_observed},
outputs = {'y_pred': tensor_info_y_pred},
method_name = signature_constants.PREDICT_METHOD_NAME)
legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op')
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map={'predict':prediction_signature,
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:prediction_signature},
legacy_init_op=legacy_init_op)
builder.save()
%%bash
ls -l /root/models/linear/cpu/[version]
from tensorflow.python.framework import graph_io
graph_io.write_graph(sess.graph, "/root/models/optimize_me/", "unoptimized_cpu.pb")
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading and Preparing Data
Step3: Big Kudos to Waleed Abdulla for providing the initial idea and many of the functions used to prepare and display the images
Step4: Let's start with creating a minimal model that overfits on a very small training set
Step5: This is how overfitting looks like in the Metrics
Step6: Hands-On
Step7: How Metrics might look like when training 500 epochs with given full model
Step8: What images does it work well on?
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import matplotlib.pylab as plt
import numpy as np
from distutils.version import StrictVersion
import sklearn
print(sklearn.__version__)
assert StrictVersion(sklearn.__version__ ) >= StrictVersion('0.18.1')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
assert StrictVersion(tf.__version__) >= StrictVersion('1.1.0')
import keras
print(keras.__version__)
assert StrictVersion(keras.__version__) >= StrictVersion('2.0.6')
!ls -l speed-limit-signs
!cat speed-limit-signs/README.md
import os
import skimage.data
import skimage.transform
from keras.utils.np_utils import to_categorical
import numpy as np
def load_data(data_dir, type=".ppm"):
num_categories = 6
# Get all subdirectories of data_dir. Each represents a label.
directories = [d for d in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, d))]
# Loop through the label directories and collect the data in
# two lists, labels and images.
labels = []
images = []
for d in directories:
label_dir = os.path.join(data_dir, d)
file_names = [os.path.join(label_dir, f) for f in os.listdir(label_dir) if f.endswith(type)]
# For each label, load it's images and add them to the images list.
# And add the label number (i.e. directory name) to the labels list.
for f in file_names:
images.append(skimage.data.imread(f))
labels.append(int(d))
images64 = [skimage.transform.resize(image, (64, 64)) for image in images]
return images64, labels
# Load datasets.
ROOT_PATH = "./"
original_dir = os.path.join(ROOT_PATH, "speed-limit-signs")
images, labels = load_data(original_dir, type=".ppm")
import matplotlib
import matplotlib.pyplot as plt
def display_images_and_labels(images, labels):
Display the first image of each label.
unique_labels = set(labels)
plt.figure(figsize=(15, 15))
i = 1
for label in unique_labels:
# Pick the first image for each label.
image = images[labels.index(label)]
plt.subplot(8, 8, i) # A grid of 8 rows x 8 columns
plt.axis('off')
plt.title("Label {0} ({1})".format(label, labels.count(label)))
i += 1
_ = plt.imshow(image)
display_images_and_labels(images, labels)
# again a little bit of feature engeneering
y = np.array(labels)
X = np.array(images)
from keras.utils.np_utils import to_categorical
num_categories = 6
y = to_categorical(y, num_categories)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9, random_state=42, stratify=y)
X_train.shape, y_train.shape
# full architecture
# %load https://djcordhose.github.io/ai/fragments/vgg_style_no_dropout.py
# my sample minimized architecture
# %load https://djcordhose.github.io/ai/fragments/vgg_style_no_dropout_overfitting.py
model = Model(input=inputs, output=predictions)
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Determines how many samples are using for training in one batch
# Depends on harware GPU architecture, set as high as possible (this works well on K80)
BATCH_SIZE = 500
%time model.fit(X_train, y_train, epochs=100, validation_split=0.2, batch_size=BATCH_SIZE)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
# https://keras.io/callbacks/#tensorboard
tb_callback = keras.callbacks.TensorBoard(log_dir='./tf_log')
# To start tensorboard
# tensorboard --logdir=/mnt/c/Users/olive/Development/ml/tf_log
# open http://localhost:6006
early_stopping_callback = keras.callbacks.EarlyStopping(monitor='val_loss', patience=50, verbose=1)
checkpoint_callback = keras.callbacks.ModelCheckpoint('./model-checkpoints/weights.epoch-{epoch:02d}-val_loss-{val_loss:.2f}.hdf5');
keras.layers.Dropout?
# full architecture with dropout
# %load https://djcordhose.github.io/ai/fragments/vgg_style_dropout.py
# my sample minimized architecture
# %load https://djcordhose.github.io/ai/fragments/vgg_style_dropout_minmal.py
model = Model(input=inputs, output=predictions)
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
!rm -r tf_log
%time model.fit(X_train, y_train, epochs=500, batch_size=BATCH_SIZE, validation_split=0.2, callbacks=[tb_callback, early_stopping_callback])
# %time model.fit(X_train, y_train, epochs=500, batch_size=BATCH_SIZE, validation_split=0.2, callbacks=[tb_callback])
# %time model.fit(X_train, y_train, epochs=500, batch_size=BATCH_SIZE, validation_split=0.2)
train_loss, train_accuracy = model.evaluate(X_train, y_train, batch_size=BATCH_SIZE)
train_loss, train_accuracy
test_loss, test_accuracy = model.evaluate(X_test, y_test, batch_size=BATCH_SIZE)
test_loss, test_accuracy
# model.save('conv-vgg.hdf5')
model.save('conv-simple.hdf5')
!ls -lh
# https://transfer.sh/
# Saved for 14 days
# !curl --upload-file conv-vgg.hdf5 https://transfer.sh
!curl --upload-file conv-simple.hdf5 https://transfer.sh
# pre-trained model
# acc: 0.98- val_acc: 0.89
# https://transfer.sh/DuZA7/conv-simple.hdf5
import random
# Pick 10 random images for test data set
random.seed(42) # to make this deterministic
sample_indexes = random.sample(range(len(X_test)), 10)
sample_images = [X_test[i] for i in sample_indexes]
sample_labels = [y_test[i] for i in sample_indexes]
ground_truth = np.argmax(sample_labels, axis=1)
ground_truth
X_sample = np.array(sample_images)
prediction = model.predict(X_sample)
predicted_categories = np.argmax(prediction, axis=1)
predicted_categories
# Display the predictions and the ground truth visually.
def display_prediction (images, true_labels, predicted_labels):
fig = plt.figure(figsize=(10, 10))
for i in range(len(true_labels)):
truth = true_labels[i]
prediction = predicted_labels[i]
plt.subplot(5, 2,1+i)
plt.axis('off')
color='green' if truth == prediction else 'red'
plt.text(80, 10, "Truth: {0}\nPrediction: {1}".format(truth, prediction),
fontsize=12, color=color)
plt.imshow(images[i])
display_prediction(sample_images, ground_truth, predicted_categories)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship
Step3: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcomes[i].
Step5: Tip
Step6: Question 1
Step7: Answer
Step9: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction
Step10: Question 2
Step11: Answer
Step13: Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction
Step14: Question 3
Step15: Answer
Step17: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
Step18: Question 4
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head())
def accuracy_score(truth, pred):
Returns accuracy score for input truth and predictions.
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(outcomes[:5], predictions)
def predictions_0(data):
Model with no features. Always predicts a passenger did not survive.
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
print accuracy_score(outcomes, predictions)
vs.survival_stats(data, outcomes, 'Sex')
def predictions_1(data):
Model with one feature:
- Predict a passenger survived if they are female.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'female':
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
print accuracy_score(outcomes, predictions)
vs.survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
def predictions_2(data):
Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'female':
predictions.append(1)
elif passenger['Age'] < 10:
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data)
print accuracy_score(outcomes, predictions)
vs.survival_stats(data, outcomes, 'Pclass', ["Age > 10", "Sex == 'female'"])
def predictions_3(data):
Model with multiple features. Makes a prediction with an accuracy of at least 80%.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'female' and passenger['Pclass'] < 3:
predictions.append(1)
elif passenger['Age'] < 6:
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
print accuracy_score(outcomes, predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Target Configuration
Step2: Tests Configuration
Step3: Tests execution
|
<ASSISTANT_TASK:>
Python Code:
import logging
from conf import LisaLogging
LisaLogging.setup()
# Execute this cell to enabled executor debugging statements
logging.getLogger('Executor').setLevel(logging.DEBUG)
from env import TestEnv
# Setup a test environment with target configuration
env = TestEnv({
# Target platform and board
"platform" : 'linux',
"board" : 'juno',
# Target board IP/MAC address
"host" : '192.168.0.1',
# Login credentials
"username" : 'root',
"password" : 'test0000',
# Folder where all the results will be collected
"results_dir" : "ExecutorExample",
# FTrace events to collect for all the tests configuration which have
# the "ftrace" flag enabled
"ftrace" : {
"events" : [
"sched_switch",
"sched_wakeup",
"sched_wakeup_new",
"cpu_frequency",
],
"buffsize" : 80 * 1024,
},
# Tools required by the experiments
"tools" : [ 'trace-cmd', 'perf' ],
# Modules required by these experiments
"modules" : [ 'bl', 'cpufreq' ],
})
my_tests_conf = {
# Platform configurations to test
"confs" : [
{
"tag" : "base",
"flags" : "ftrace", # Enable FTrace events
"sched_features" : "NO_ENERGY_AWARE", # Disable EAS
"cpufreq" : { # Use PERFORMANCE CpuFreq
"governor" : "performance",
},
},
{
"tag" : "eas",
"flags" : "ftrace", # Enable FTrace events
"sched_features" : "ENERGY_AWARE", # Enable EAS
"cpufreq" : { # Use PERFORMANCE CpuFreq
"governor" : "performance",
},
},
],
# Workloads to run (on each platform configuration)
"wloads" : {
# Run hackbench with 1 group using pipes
"perf" : {
"type" : "perf_bench",
"conf" : {
"class" : "messaging",
"params" : {
"group" : 1,
"loop" : 10,
"pipe" : True,
"thread": True,
}
}
},
# Run a 20% duty-cycle periodic task
"rta" : {
"type" : "rt-app",
"loadref" : "big",
"conf" : {
"class" : "profile",
"params" : {
"p20" : {
"kind" : "Periodic",
"params" : {
"duty_cycle_pct" : 20,
},
},
},
},
},
},
# Number of iterations for each workload
"iterations" : 1,
}
from executor import Executor
executor = Executor(env, my_tests_conf)
executor.run()
!tree {executor.te.res_dir}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementation
Step2: Preparing the Data
Step3: Preprocess Feature Columns
Step4: Implementation
Step5: Training and Evaluating Models
Step6: Implementation
Step7: Tabular Results
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries
import numpy as np
import pandas as pd
from time import time
from sklearn.metrics import f1_score
# Read student data
student_data = pd.read_csv("student-data.csv")
print "Student data read successfully!"
# TODO: Calculate number of students
n_students = None
# TODO: Calculate number of features
n_features = None
# TODO: Calculate passing students
n_passed = None
# TODO: Calculate failing students
n_failed = None
# TODO: Calculate graduation rate
grad_rate = None
# Print the results
print "Total number of students: {}".format(n_students)
print "Number of features: {}".format(n_features)
print "Number of students who passed: {}".format(n_passed)
print "Number of students who failed: {}".format(n_failed)
print "Graduation rate of the class: {:.2f}%".format(grad_rate)
# Extract feature columns
feature_cols = list(student_data.columns[:-1])
# Extract target column 'passed'
target_col = student_data.columns[-1]
# Show the list of columns
print "Feature columns:\n{}".format(feature_cols)
print "\nTarget column: {}".format(target_col)
# Separate the data into feature data and target data (X_all and y_all, respectively)
X_all = student_data[feature_cols]
y_all = student_data[target_col]
# Show the feature information by printing the first five rows
print "\nFeature values:"
print X_all.head()
def preprocess_features(X):
''' Preprocesses the student data and converts non-numeric binary variables into
binary (0/1) variables. Converts categorical variables into dummy variables. '''
# Initialize new output DataFrame
output = pd.DataFrame(index = X.index)
# Investigate each feature column for the data
for col, col_data in X.iteritems():
# If data type is non-numeric, replace all yes/no values with 1/0
if col_data.dtype == object:
col_data = col_data.replace(['yes', 'no'], [1, 0])
# If data type is categorical, convert to dummy variables
if col_data.dtype == object:
# Example: 'school' => 'school_GP' and 'school_MS'
col_data = pd.get_dummies(col_data, prefix = col)
# Collect the revised columns
output = output.join(col_data)
return output
X_all = preprocess_features(X_all)
print "Processed feature columns ({} total features):\n{}".format(len(X_all.columns), list(X_all.columns))
# TODO: Import any additional functionality you may need here
# TODO: Set the number of training points
num_train = None
# Set the number of testing points
num_test = X_all.shape[0] - num_train
# TODO: Shuffle and split the dataset into the number of training and testing points above
X_train = None
X_test = None
y_train = None
y_test = None
# Show the results of the split
print "Training set has {} samples.".format(X_train.shape[0])
print "Testing set has {} samples.".format(X_test.shape[0])
def train_classifier(clf, X_train, y_train):
''' Fits a classifier to the training data. '''
# Start the clock, train the classifier, then stop the clock
start = time()
clf.fit(X_train, y_train)
end = time()
# Print the results
print "Trained model in {:.4f} seconds".format(end - start)
def predict_labels(clf, features, target):
''' Makes predictions using a fit classifier based on F1 score. '''
# Start the clock, make predictions, then stop the clock
start = time()
y_pred = clf.predict(features)
end = time()
# Print and return results
print "Made predictions in {:.4f} seconds.".format(end - start)
return f1_score(target.values, y_pred, pos_label='yes')
def train_predict(clf, X_train, y_train, X_test, y_test):
''' Train and predict using a classifer based on F1 score. '''
# Indicate the classifier and the training set size
print "Training a {} using a training set size of {}. . .".format(clf.__class__.__name__, len(X_train))
# Train the classifier
train_classifier(clf, X_train, y_train)
# Print the results of prediction for both training and testing
print "F1 score for training set: {:.4f}.".format(predict_labels(clf, X_train, y_train))
print "F1 score for test set: {:.4f}.".format(predict_labels(clf, X_test, y_test))
# TODO: Import the three supervised learning models from sklearn
# from sklearn import model_A
# from sklearn import model_B
# from skearln import model_C
# TODO: Initialize the three models
clf_A = None
clf_B = None
clf_C = None
# TODO: Set up the training set sizes
X_train_100 = None
y_train_100 = None
X_train_200 = None
y_train_200 = None
X_train_300 = None
y_train_300 = None
# TODO: Execute the 'train_predict' function for each classifier and each training set size
# train_predict(clf, X_train, y_train, X_test, y_test)
# TODO: Import 'GridSearchCV' and 'make_scorer'
# TODO: Create the parameters list you wish to tune
parameters = None
# TODO: Initialize the classifier
clf = None
# TODO: Make an f1 scoring function using 'make_scorer'
f1_scorer = None
# TODO: Perform grid search on the classifier using the f1_scorer as the scoring method
grid_obj = None
# TODO: Fit the grid search object to the training data and find the optimal parameters
grid_obj = None
# Get the estimator
clf = grid_obj.best_estimator_
# Report the final F1 score for training and testing after parameter tuning
print "Tuned model has a training F1 score of {:.4f}.".format(predict_labels(clf, X_train, y_train))
print "Tuned model has a testing F1 score of {:.4f}.".format(predict_labels(clf, X_test, y_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Well, I have not found the one I wanted...I wanted from red to white to green
Step2: Using cma_builder and test_cmap
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from colormap import Colormap
c = Colormap()
cmap = c.cmap('cool')
# let us see what it looks like
c.test_colormap(cmap)
#Would be nice to plot a bunch of colormap to pick up one interesting
c.plot_colormap('diverging')
c.plot_colormap(c.misc)
c.plot_colormap(c.qualitative)
c.plot_colormap(c.sequentials)
c.plot_colormap(c.sequentials2)
# This list is implemented in colormap package itself
c.plot_colormap(c.diverging_black)
mycmap = c.cmap_linear('red', 'white', 'green(w3c)')
c.test_colormap(mycmap)
mycmap = c.cmap_bicolor('red', 'green(w3c)')
c.test_colormap(mycmap)
# there is also 2 extra maps from R
mycmap = c.get_cmap_heat()
c.test_colormap(mycmap)
# color can be given a a name available in
import colormap.xfree86 as colors
list(colors.XFree86_colors.keys())[0:5]
#or
list(colors.XFree86_colors.values())[0:5]
# or as RGB, HLS, HSV, YUX, Hexa format
from colormap import Color
co = Color('white')
co.hex
mycmap = c.cmap_linear('red', '#FFFFFF', 'green(w3c)')
c.test_colormap(mycmap)
# Conversion between colors
c = Color('red')
c.rgb
c.hls
c.hex
print(c)
from colormap import cmap_builder, test_cmap
mycm = cmap_builder('red', 'white', 'green')
test_cmap(mycm)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Original Voce-Chaboche model
Step2: Tables can be easily generated following a standard format for several data sets by appending additional entries to the lists of values in material_def and to x_logs_all and data_all.
Step3: The value of $\xi_2 = 65$ %, indicating that the two sets of parameters are inconsistent for this data set.
|
<ASSISTANT_TASK:>
Python Code:
# First load RESSPyLab and necessary packages
import numpy as np
import RESSPyLab as rpl
# Identify the material
material_def = {'material_id': ['Example 1'], 'load_protocols': ['1,5']}
# Set the path to the x log file
x_log_file_1 = './output/x_log.txt'
x_logs_all = [x_log_file_1]
# Load the data
data_files_1 = ['example_1.csv']
data_1 = rpl.load_data_set(data_files_1)
data_all = [data_1]
# Make the tables
param_table, metric_table = rpl.summary_tables_maker_vc(material_def, x_logs_all, data_all)
# Load the base parameters, we want the last entry in the file
x_base = np.loadtxt(x_log_file_1, delimiter=' ')
x_base = x_base[-1]
# Load (or set) the sample parameters
x_sample = np.array([179750., 318.47, 100.72, 8.00, 11608.17, 145.22, 1026.33, 4.68])
# Calculate the metric
consistency_metric = rpl.vc_consistency_metric(x_base, x_sample, data_1)
print consistency_metric
# Identify the material
material_def = {'material_id': ['Example 1'], 'load_protocols': ['1']}
# Set the path to the x log file
x_log_file_2 = './output/x_log_upd.txt'
x_logs_all = [x_log_file_2]
# Load the data
data_files_2 = ['example_1.csv']
data_2 = rpl.load_data_set(data_files_2)
data_all = [data_2]
# Make the tables
param_table, metric_table = rpl.summary_tables_maker_uvc(material_def, x_logs_all, data_all)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1.1 (10 pts) Use 5-fold cross validation, leave-one-out CV, and a 50% holdout to tune the bandwidth and ridge penalty parameter for the RBF kernel for SVMs. Qualitatively compare the results.
Step2: Exercise 1.2 (10 pts) Select one of these methods for tuning the bandwidth parameters and make this a function that takes in a dataset, performs cross-validation, and outputs the tuning parameters. Make a training and test set split and tune parameters for linear SVMs, RBF kernel SVMs, and logistic regression on the training set, then compare their performances on the test set using ROC and PR curves.
Step4: Reuters dataset
Step5: Exercise 2.1 (10 pts) Compute Kmeans with 8 clusters on the training set. For each cluster, what are the 10 terms that have the highest components in the cluster center vectors? What are the sizes of the clusters? Compute a 'confusion matrix' to compare these 8 clusters to the labelled topics (I want an 8 x 8 matrix of counts of documents in each topic and cluster - there are 8 topics).
Step6: 10 highest components terms in each the cluster center vectors
Step7: confusion matrix to for these 8 clusters
Step8: Exercise 2.2 (10 pts) Reverse the roles of words and documents and perform hierarchical agglomerative clustering (you can just transpose X) up to 40 clusters. Compare the dendrograms for three different linkage types, and try to make sense of the resulting clusters.
Step9: 1000 most common words in the training set
Step10: Exercise 2.3 (10 pts) Use SVMs trained on the training set to predict the test set. Evaluate the performance with the ROC, PR, and confusion matrix. Don't forget to appropriately cross-validate to tune parameters.
Step11: classify earn
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn.preprocessing import scale
Dtrans = np.loadtxt("transfusion.data",dtype=np.str_,delimiter=",")
X = np.array(Dtrans[1:,0:4],dtype=float)
y = np.array(Dtrans[1:,4],dtype=float)
X = scale(X)
from sklearn import svm
import sklearn.linear_model as skl_lm
from sklearn.model_selection import KFold, cross_val_score
from sklearn.model_selection import LeaveOneOut
import time
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve, roc_curve,confusion_matrix, classification_report
%matplotlib inline
C = [0.1 ,0.5 , 1.0, 3.0, 10, 20 , 50 ,100 ]
methods = ['5-fold cross validation', 'leave-one-out CV', '50% holdout' ]
timelist = []
loo = LeaveOneOut()
kfold = KFold(n_splits=5)
idx = np.random.choice(np.arange(len(X)),374, replace=False)
test = list(set(idx).intersection(set(range(748))))
for method in methods:
print 'tune method is ',method
for i in C:
A = time.clock()
svc = svm.SVC(C=i)
if method == '5-fold cross validation':
print 'C =',i,' score =',cross_val_score(svc, X, y, cv=kfold).mean()
elif method == 'leave-one-out CV':
print 'C =',i,' score =',cross_val_score(svc, X, y, cv=loo).mean()
else:
print 'C =',i,' score =',svc.fit(X[idx], y[idx]).score(X[test], y[test])
B = time.clock()
timelist.append(B-A)
for i in range(3):
print 'avg running time of ',methods[i],' is ',np.mean(timelist[i*8:(i+1)*8]),' seconds'
xx = np.linspace(0,5,100)[1:]
five_fold = []
leaveone = []
holdout = []
for method in methods:
for i in xx:
svc = svm.SVC(gamma=i)
if method == '5-fold cross validation':
five_fold.append(cross_val_score(svc, X, y, cv=kfold).mean())
elif method == 'leave-one-out CV':
leaveone.append(cross_val_score(svc, X, y, cv=loo).mean())
else:
holdout.append(svc.fit(X[idx], y[idx]).score(X[test], y[test]))
plt.figure()
plt.plot(xx,five_fold,label=methods[0])
plt.plot(xx,leaveone,label=methods[1])
plt.plot(xx,holdout,label=methods[2])
plt.legend(loc='right' )
plt.xlabel('ridge penalty parameter')
plt.ylabel('accuracy')
plt.title('relation between ridge penalty parameter and accuraacy')
plt.show()
print methods[0],'best accuracy:',max(five_fold),'best ridge penalty:',xx[np.argmax(five_fold)]
print methods[1],'best accuracy:',max(leaveone),'best ridge penalty:',xx[np.argmax(leaveone)]
print methods[2],'best accuracy:',max(holdout),'best ridge penalty:',xx[np.argmax(holdout)]
def best_band_5fold(X,y,model,linear=False):
'''
find best bandwidth
input X, y , MODEL being used to fit, whether use linear svc
output best bandwidth
'''
C = np.linspace(0.000001,31,100)
five_fold = []
kfold = KFold(n_splits=5)
for i in C:
svc = model(C=i)
if linear:
svc = model(C=i,kernel='linear')
five_fold.append(cross_val_score(svc, X, y, cv=kfold).mean())
return C[np.argmax(five_fold)]
print 'best bandwith for RBF SVM',best_band_5fold(X,y,svm.SVC)
print 'best bandwith for linearSVM',best_band_5fold(X,y,svm.SVC,linear=True)
print 'best bandwith for logistic',best_band_5fold(X,y,skl_lm.LogisticRegression)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=0)
logit = skl_lm.LogisticRegression(solver='newton-cg', C=0.155779889447)
logit.fit(X_train,y_train)
prob = logit.predict_proba(X_test)
logfpr, logtpr, logthr = roc_curve(y_test, prob[:,1])
logpre, logrec, logthresh = precision_recall_curve(y_test, prob[:,1])
svc = svm.SVC(probability=True, C=29.1306533266)
svc.fit(X_train,y_train)
pred = svc.predict_proba(X_test)
svcfpr, svctpr, svcthr = roc_curve(y_test, pred[:,1])
svcpre, svcrec, svcthresh = precision_recall_curve(y_test, pred[:,1])
lsvm = svm.SVC(C=1e-06,kernel='linear',probability=True)
lsvm.fit(X_train,y_train)
pred = lsvm.predict_proba(X_test)
lsvmfpr, lsvmtpr, lsvmthr = roc_curve(y_test, pred[:,1])
lsvmpre, lsvmrec, lsvmthresh = precision_recall_curve(y_test, pred[:,1])
plt.figure()
lw = 2
plt.plot(logfpr,logtpr,lw=lw, label='logistic regression')
plt.plot(lsvmfpr,lsvmtpr,lw=lw, label='linear SVM')
plt.plot(svcfpr,svctpr,lw=lw, label='RBF SVM')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for three methods')
plt.legend(loc="lower right")
plt.show()
plt.figure()
lw = 2
plt.plot(logrec,logpre,lw=lw, label='logistic regression')
plt.plot(lsvmrec,lsvmpre,lw=lw, label='linear SVM')
plt.plot(svcrec,svcpre,lw=lw, label='RBF SVM')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall curve for three methods')
plt.legend(loc=1)
plt.show()
from collections import Counter
import scipy.sparse as sp
def read_corpus(filename):
Read a R8 corpus
Input: filename
Output: the corpus which is a list of documents with topic labels and stemmed terms
corpus = []
DF = Counter()
with open(filename,"r") as f:
for line in f:
label, terms = line.split('\t')
terms = terms.strip().split()
label = label.strip()
corpus.append([label,Counter(terms)])
DF += Counter(set(terms))
return(corpus, DF)
train_corpus, train_DF = read_corpus('r8-train-stemmed.txt')
test_corpus, test_DF = read_corpus('r8-test-stemmed.txt')
# You should use the document frequency for the whole corpus
full_DF = train_DF + test_DF
## dict_list is the list of dictionary terms,
## while dict_index can take you from the term back to its index
dict_list = full_DF.keys()
dict_index = {t:i for i,t in enumerate(dict_list)}
n, p = len(train_corpus), len(full_DF)
logN = np.log(len(train_corpus) + len(test_corpus))
X_tr = sp.lil_matrix((n,p))
## This creates the sparse training matrix, you will need to modify this to create the test matrix.
## I am using a normalized TFIDF transform below, figure out what it is doing.
for i in xrange(n):
tpc, doc = train_corpus[i]
vec = np.array([(logN - np.log(train_DF[term]))*doc[term] for term in doc.keys()])
vec = vec / np.linalg.norm(vec)
X_tr[i,[dict_index[term] for term in doc.keys()]] = vec
from sklearn.cluster import KMeans
cluster_center = KMeans().fit(X_tr).cluster_centers_
for cluster in cluster_center:
print [name for name, age in dict_index.items() if age in np.argsort(cluster)[-10:]]
print 'sizes of the clusters is ',cluster.size
y_label = {}
y = [i[0] for i in train_corpus]
for i,j in zip(set(y),range(8)):
y_label[i] = j
print(y_label)
y = [y_label[i] for i in y if i in y_label]
pred = KMeans().fit_predict(X_tr,y)
confusion_matrix(y, pred)
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram, linkage
X_transp = X_tr.T
def draw_dendrograme(X_transp ,num, method):
'''
draw dendrograme for different linkage type
num is cluster number(int), method is linkage type(str)
'''
clusterer = AgglomerativeClustering(n_clusters=num, linkage=method)
MODEL1 = clusterer.fit(X_transp.toarray())
model1_list = MODEL1.labels_
cluster_dict = {}
for i in range(len(model1_list)):
if model1_list[i] not in cluster_dict:
cluster_dict[model1_list[i]] = [i]
else:
cluster_dict[model1_list[i]].append(i)
center=[]
for i in range(40):
center.append(np.mean(X_transp[cluster_dict[i],:],axis=0))
ceterarray = np.array(center).reshape(40,5485)
Z = linkage(ceterarray, method=method )
plt.figure()
plt.title('dendrograms for '+ method+' linkage type')
dn = dendrogram(Z)
plt.show()
draw_dendrograme(X_transp, 40, 'ward')
draw_dendrograme(X_transp, 40, 'complete')
draw_dendrograme(X_transp, 40, 'average')
#1000 most common words in the training set instead
top_1000 = sorted(train_DF.items(), key=lambda x: x[1],reverse=True)[:1000]
top_1000_index = [dict_index[i[0]] for i in top_1000]
X_1000 = X_transp[top_1000_index]
Z = linkage(X_1000.toarray(), method='ward' )
plt.figure()
plt.title('1000 most common words dendrograms for ward linkage type')
dn = dendrogram(Z,p=40,truncate_mode='lastp')
Z = linkage(X_1000.toarray(), method='complete' )
plt.figure()
plt.title('1000 most common words dendrograms for complete linkage type')
dn = dendrogram(Z,p=40,truncate_mode='lastp')
Z = linkage(X_1000.toarray(), method='average' )
plt.figure()
plt.title('1000 most common words dendrograms for average linkage type')
dn = dendrogram(Z,p=40,truncate_mode='lastp')
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_curve, auc
from itertools import cycle
print 'best bandwidth',best_band_5fold(X_tr,y,svm.SVC)
xx = np.linspace(0,5,10)[1:]
kfold = KFold(n_splits=5)
five_fold2 = []
for i in xx:
svc = svm.SVC(gamma=i)
five_fold2.append(cross_val_score(svc, X_tr, y_train, cv=kfold).mean())
print 'best ridge penalty:',xx[np.argmax(five_fold2)]
#generate train matrix
m, p = len(test_corpus), len(full_DF)
logN = np.log(len(train_corpus) + len(test_corpus))
X_testtr = sp.lil_matrix((m,p))
for i in xrange(m):
tpc, doc = test_corpus[i]
vec = np.array([(logN - np.log(test_DF[term]))*doc[term] for term in doc.keys()])
vec = vec / np.linalg.norm(vec)
X_testtr[i,[dict_index[term] for term in doc.keys()]] = vec
y_label = {}
y_train = [i[0] for i in train_corpus]
y_test = [i[0] for i in test_corpus]
for i,j in zip(set(y_train),range(8)):
y_label[i] = j
y_train = [y_label[i] for i in y_train if i in y_label]
y_test = [y_label[i] for i in y_test if i in y_label]
y_train = label_binarize(y_train, classes=[0,1,2,3,4,5,6,7])
y_test = label_binarize(y_test, classes=[0,1,2,3,4,5,6,7])
n_classes = y_train.shape[1]
random_state = np.random.RandomState(0)
classifier = OneVsRestClassifier(svm.SVC(gamma=0.555555555556,C=1, probability=True,random_state=random_state))
y_score = classifier.fit(X_tr, y_train).decision_function(X_testtr)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
lw = 3
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(15,10))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=6)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=6)
colors = cycle(['black','silver','red','gold','blue','moccasin','darkorange','cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate',fontsize=20)
plt.ylabel('True Positive Rate',fontsize=20)
plt.title('Receiver operating characteristic to multi-class',fontsize=20)
plt.legend(loc=4)
plt.show()
from sklearn.metrics import average_precision_score
y_train = label_binarize(y_train, classes=[0,1,2,3,4,5,6,7])
y_test = label_binarize(y_test, classes=[0,1,2,3,4,5,6,7])
n_classes = y_train.shape[1]
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(gamma=0.555555555556,C=1e-06, probability=True,random_state=random_state))
y_score = classifier.fit(X_tr, y_train).decision_function(X_testtr)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve for each class
plt.figure(figsize=(15,10))
plt.plot(recall["micro"], precision["micro"], color='gold', lw=lw,
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
plt.plot(recall[i], precision[i], color=color, lw=lw,
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall',fontsize=20)
plt.ylabel('Precision',fontsize=20)
plt.title('Extension of Precision-Recall curve to multi-class',fontsize=20)
plt.legend(loc="lower left")
plt.show()
y_test = [i[0] for i in test_corpus]
y_test = [y_label[i] for i in y_test if i in y_label]
svc = svm.SVC(C=1.0, gamma=0.555555555556)
svc.fit(X_tr,y)
predy = svc.predict(X_testtr)
print 'RBF Kernel'
confusion_matrix(y_test, predy)
svc = svm.SVC(C=1.0, gamma=0.555555555556,kernel='linear')
svc.fit(X_tr,y)
predy = svc.predict(X_testtr)
print 'Linear Kernel'
confusion_matrix(y_test, predy)
y_label = {}
y_train = [i[0] for i in train_corpus]
y_test = [i[0] for i in test_corpus]
for i,j in zip(set(y_train),range(8)):
y_label[i] = j
y_train = [y_label[i] for i in y_train if i in y_label]
y_test = [y_label[i] for i in y_test if i in y_label]
y_train = [1 if i==0 else 0 for i in y_train]
y_test = [1 if i==0 else 0 for i in y_test]
svc = svm.SVC( C=1.0,gamma=0.555555555556,probability=True)
lsvc = svm.SVC(C=1.0, probability=True,kernel='linear')
svc.fit(X_tr,y_train)
lsvc.fit(X_tr,y_train)
pred = svc.predict_proba(X_testtr)
lpred = lsvc.predict_proba(X_testtr)
svcfpr, svctpr, svcthr = roc_curve(y_test, pred[:,1])
svcpre, svcrec, svcthresh = precision_recall_curve(y_test, pred[:,1])
lsvcfpr, lsvctpr, lsvcthr = roc_curve(y_test, lpred[:,1])
lsvcpre, lsvcrec, lsvcthresh = precision_recall_curve(y_test, lpred[:,1])
plt.figure()
lw = 2
plt.plot(svcfpr,svctpr,lw=lw, label='RBF SVM')
plt.plot(lsvcfpr,lsvctpr,lw=lw, label='Linear SVM')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for earn class')
plt.legend(loc="lower right")
plt.show()
plt.figure()
lw = 2
plt.plot(svcrec,svcpre,lw=lw, label='RBF SVM')
plt.plot(lsvcrec,lsvcpre,lw=lw, label='Linear SVM')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall curve for earn class')
plt.legend(loc=4)
plt.show()
svc = svm.SVC( gamma=0.555555555556,probability=True)
svc.fit(X_tr,y_train)
predyy = svc.predict(X_testtr)
print('rbf svc')
confusion_matrix(y_test, predyy)
lsvc = svm.SVC(kernel='linear')
lsvc.fit(X_tr,y_train)
predyy = lsvc.predict(X_testtr)
print('linear svc')
confusion_matrix(y_test, predyy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Split this string
Step2: Given the variables
Step3: Given this nested list, use indexing to grab the word "hello"
Step4: Given this nested dictionary grab the word "hello". Be prepared, this will be annoying/tricky
Step5: What is the main difference between a tuple and a list?
Step6: Create a function that grabs the email website domain from a string in the form
Step7: Create a basic function that returns True if the word 'dog' is contained in the input string. Don't worry about edge cases like a punctuation being attached to the word dog, but do account for capitalization.
Step8: Create a function that counts the number of times the word "dog" occurs in a string. Again ignore edge cases.
Step9: Use lambda expressions and the filter() function to filter out words from a list that don't start with the letter 's'. For example
Step10: Final Problem
Step11: Great job!
|
<ASSISTANT_TASK:>
Python Code:
7**4
s = "Hi there Sam!"
s.split()
planet = "Earth"
diameter = 12742
print("The diameter of {} is {} kilometers.".format(planet,diameter))
lst = [1,2,[3,4],[5,[100,200,['hello']],23,11],1,7]
lst[3][1][2]
d = {'k1':[1,2,3,{'tricky':['oh','man','inception',{'target':[1,2,3,'hello']}]}]}
# Tuple is immutable
na = "user@domain.com"
na.split("@")[1]
def domainGet(name):
return name.split("@")[1]
domainGet('user@domain.com')
def findDog(sentence):
x = sentence.split()
for item in x:
if item == "dog":
return True
findDog('Is there a dog here?')
countDog('This dog runs faster than the other dog dude!')
seq = ['soup','dog','salad','cat','great']
def caught_speeding(speed, is_birthday):
if s_birthday == False:
if speed <= 60:
return "No ticket"
elif speed >= 61 and speed <=80:
return "small ticket"
elif speed >81:
return "Big ticket"
else:
return "pass"
caught_speeding(81,False)
caught_speeding(81,False)
lst = ["7:00","7:30"]
lst
type(lst)
type(lst[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2D trajectory interpolation
Step2: Use these arrays to create interpolated functions $x(t)$ and $y(t)$. Then use those functions to create the following arrays
Step3: Make a parametric plot of ${x(t),y(t)}$ that shows the interpolated values and the original points
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.interpolate import interp1d, interp2d
f = np.load('trajectory.npz')
x = f['x']
y = f['y']
t = f['t']
assert isinstance(x, np.ndarray) and len(x)==40
assert isinstance(y, np.ndarray) and len(y)==40
assert isinstance(t, np.ndarray) and len(t)==40
x_approx = interp1d(t, x, kind='cubic')
y_approx = interp1d(t, y, kind='cubic')
traj_approx = interp2d(x, y, t, kind='cubic')
newt = np.linspace(t.min(),max(t),200)
newx = x_approx(newt)
newy = y_approx(newt)
assert newt[0]==t.min()
assert newt[-1]==t.max()
assert len(newt)==200
assert len(newx)==200
assert len(newy)==200
fig = plt.figure(figsize=(7,7))
plt.plot(newx, newy, marker='.')
plt.plot(x, y, 'ro')
plt.xticks([-1.0,-0.5,0.0,0.5,1.0])
plt.yticks([-1.0,-0.5,0.0,0.5,1.0])
plt.xlabel('x(t)')
plt.ylabel('y(t)')
assert True # leave this to grade the trajectory plot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Synthetic data creation
Step2: Unobserved components (frequency domain modeling)
Step3: Observe that the fitted variances are pretty close to the true variances of 4 and 9. Further, the individual seasonal components look pretty close to the true seasonal components. The smoothed level term is kind of close to the true level of 10. Finally, our diagnostics look solid; the test statistics are small enough to fail to reject our three tests.
Step4: The plotted components look good. However, the estimated variance of the second seasonal term is inflated from reality. Additionally, we reject the Ljung-Box statistic, indicating we may have remaining autocorrelation after accounting for our components.
Step5: Note that one of our diagnostic tests would be rejected at the .05 level.
Step6: The seasonal component itself looks good--it is the primary singal. The estimated variance of the seasonal term is very high ($>10^5$), leading to a lot of uncertainty in our one-step-ahead predictions and slow responsiveness to new data, as evidenced by large errors in one-step ahead predictions and observations. Finally, all three of our diagnostic tests were rejected.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
# First we'll simulate the synthetic data
def simulate_seasonal_term(periodicity, total_cycles, noise_std=1.,
harmonics=None):
duration = periodicity * total_cycles
assert duration == int(duration)
duration = int(duration)
harmonics = harmonics if harmonics else int(np.floor(periodicity / 2))
lambda_p = 2 * np.pi / float(periodicity)
gamma_jt = noise_std * np.random.randn((harmonics))
gamma_star_jt = noise_std * np.random.randn((harmonics))
total_timesteps = 100 * duration # Pad for burn in
series = np.zeros(total_timesteps)
for t in range(total_timesteps):
gamma_jtp1 = np.zeros_like(gamma_jt)
gamma_star_jtp1 = np.zeros_like(gamma_star_jt)
for j in range(1, harmonics + 1):
cos_j = np.cos(lambda_p * j)
sin_j = np.sin(lambda_p * j)
gamma_jtp1[j - 1] = (gamma_jt[j - 1] * cos_j
+ gamma_star_jt[j - 1] * sin_j
+ noise_std * np.random.randn())
gamma_star_jtp1[j - 1] = (- gamma_jt[j - 1] * sin_j
+ gamma_star_jt[j - 1] * cos_j
+ noise_std * np.random.randn())
series[t] = np.sum(gamma_jtp1)
gamma_jt = gamma_jtp1
gamma_star_jt = gamma_star_jtp1
wanted_series = series[-duration:] # Discard burn in
return wanted_series
duration = 100 * 3
periodicities = [10, 100]
num_harmonics = [3, 2]
std = np.array([2, 3])
np.random.seed(8678309)
terms = []
for ix, _ in enumerate(periodicities):
s = simulate_seasonal_term(
periodicities[ix],
duration / periodicities[ix],
harmonics=num_harmonics[ix],
noise_std=std[ix])
terms.append(s)
terms.append(np.ones_like(terms[0]) * 10.)
series = pd.Series(np.sum(terms, axis=0))
df = pd.DataFrame(data={'total': series,
'10(3)': terms[0],
'100(2)': terms[1],
'level':terms[2]})
h1, = plt.plot(df['total'])
h2, = plt.plot(df['10(3)'])
h3, = plt.plot(df['100(2)'])
h4, = plt.plot(df['level'])
plt.legend(['total','10(3)','100(2)', 'level'])
plt.show()
model = sm.tsa.UnobservedComponents(series.values,
level='fixed intercept',
freq_seasonal=[{'period': 10,
'harmonics': 3},
{'period': 100,
'harmonics': 2}])
res_f = model.fit(disp=False)
print(res_f.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_f.smoother_results.smoothed_state[0,-1:][0]))
res_f.plot_components()
plt.show()
model.ssm.transition[:, :, 0]
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
seasonal=10,
freq_seasonal=[{'period': 100,
'harmonics': 2}])
res_tf = model.fit()
print(res_tf.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_tf.smoother_results.smoothed_state[0,-1:][0]))
res_tf.plot_components()
plt.show()
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
freq_seasonal=[{'period': 100}])
res_lf = model.fit()
print(res_lf.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_lf.smoother_results.smoothed_state[0,-1:][0]))
res_lf.plot_components()
plt.show()
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
seasonal=100)
res_lt = model.fit(disp=False)
print(res_lt.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_lt.smoother_results.smoothed_state[0,-1:][0]))
res_lt.plot_components()
plt.show()
# Assign better names for our seasonal terms
true_seasonal_10_3 = terms[0]
true_seasonal_100_2 = terms[1]
true_sum = true_seasonal_10_3 + true_seasonal_100_2
time_s = np.s_[:50] # After this they basically agree
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
h1, = ax1.plot(series.index[time_s], res_f.freq_seasonal[0].filtered[time_s], label='Double Freq. Seas')
h2, = ax1.plot(series.index[time_s], res_tf.seasonal.filtered[time_s], label='Mixed Domain Seas')
h3, = ax1.plot(series.index[time_s], true_seasonal_10_3[time_s], label='True Seasonal 10(3)')
plt.legend([h1, h2, h3], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth'], loc=2)
plt.title('Seasonal 10(3) component')
plt.show()
time_s = np.s_[:50] # After this they basically agree
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
h21, = ax2.plot(series.index[time_s], res_f.freq_seasonal[1].filtered[time_s], label='Double Freq. Seas')
h22, = ax2.plot(series.index[time_s], res_tf.freq_seasonal[0].filtered[time_s], label='Mixed Domain Seas')
h23, = ax2.plot(series.index[time_s], true_seasonal_100_2[time_s], label='True Seasonal 100(2)')
plt.legend([h21, h22, h23], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth'], loc=2)
plt.title('Seasonal 100(2) component')
plt.show()
time_s = np.s_[:100]
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
h31, = ax3.plot(series.index[time_s], res_f.freq_seasonal[1].filtered[time_s] + res_f.freq_seasonal[0].filtered[time_s], label='Double Freq. Seas')
h32, = ax3.plot(series.index[time_s], res_tf.freq_seasonal[0].filtered[time_s] + res_tf.seasonal.filtered[time_s], label='Mixed Domain Seas')
h33, = ax3.plot(series.index[time_s], true_sum[time_s], label='True Seasonal 100(2)')
h34, = ax3.plot(series.index[time_s], res_lf.freq_seasonal[0].filtered[time_s], label='Lazy Freq. Seas')
h35, = ax3.plot(series.index[time_s], res_lt.seasonal.filtered[time_s], label='Lazy Time Seas')
plt.legend([h31, h32, h33, h34, h35], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth', 'Lazy Freq. Seas', 'Lazy Time Seas'], loc=1)
plt.title('Seasonal components combined')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import section specific modules
Step5: 2.11 Least-squares Minimization<a id='groundwork
Step6: The three functions defined above will be used frequently during the Levenberg-Marquardt solution procedure. The following few lines of code just set up the values we need to call the Levenberg-Marquardt solver.
Step8: The following plots show the observed data and the curve corresponding to our initial guess for the parameters.
Step9: The above is the main function of the Levenberg-Marquardt algorithm. The code may appear daunting at first, but all it does is implement the Levenberg-Marquardt update rule and some checks of convergence. We can now apply it to the problem with relative ease to obtain a numerical solution for our parameter vector.
Step10: We can now compare our numerical result with both the truth and the data. The following plot shows the various quantities of interest.
Step11: The fitted values are so close to the true values that it is almost impossible to differentiate between the red and green lines in the above plot. The true values have been omitted from the following plot to make it clearer that the numerical solution does an excellent job of arriving at the correct parameter values.
Step12: A final, important thing to note is that the Levenberg-Marquardt algorithm is already implemented in Python. It is used in scipy.optimise.leastsq. This is often useful for doing rapid numerical solution without the need for an analytic Jacobian. As a simple proof, we can call the built-in method to verify our results.
Step13: In this case, the built-in method clearly fails. I have done this deliberately to illustrate a point - a given implementation of an algorithm might not be the best one for your application. In this case, the manner in which the tuning parameters are handled prevents the solution from converging correctly. This can be avoided by choosing a starting guess closer to the truth and once again highlights the importance of initial values in problems of this type.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
from scipy.optimize import leastsq
plt.rcParams['figure.figsize'] = (18, 6)
from IPython.display import HTML
HTML('../style/code_toggle.html')
def sinusoid(x, t):
Returns a vector containing the values of a sinusoid with parameters x evaluated at points t.
INPUTS:
t Value of independent variable at the sampled points.
x Vector of parameters.
x1 = x[0] #Amplitude
x2 = x[1] #Frequency
x3 = x[2] #Phase-shift
return x1*np.sin(2*np.pi*x2*t + x3)
def sinusoid_jacobian(x, t):
Returns the Jacobian corresponding to the function defined in sinusoid.
INPUTS:
t Value of independent variable at the sampled points.
x Vector of parameters.
x1 = x[0] #Amplitude
x2 = x[1] #Frequency
x3 = x[2] #Phase-shift
jacobian = np.empty([t.shape[0], x.shape[0]])
jacobian[:,0] = np.sin(2*np.pi*x2*t + x3)
jacobian[:,1] = 2*np.pi*t*x1*np.cos(2*np.pi*x2*t + x3)
jacobian[:,2] = x1*np.cos(2*np.pi*x2*t + x3)
return jacobian
def sinusoid_residual(x, t, d):
Returns a vector containing the residual values.
INPUTS:
d Vector of measured values.
t Value of independent variable at the sampled points.
x Vector of parameters.
return d - sinusoid(x, t)
t = np.arange(-0.06, 0.06, 0.06/300) #The points at which we will be taking our "measurements"
noise = 2*np.random.normal(size=(t.shape[0])) #A noise vector which we will use to manufacture "real" measurements.
true_x = np.array([10., 33.3, 0.52]) #The true values of our parameter vector.
x = np.array([8., 43.5, 1.05]) #Initial guess of parameter vector for our solver.
d = sinusoid(true_x, t) + noise #Our "observed" data, contructed from our true parameter values and the noise vector.
m = sinusoid(x, t) #Our fitted function using the initial guess parameters.
plt.plot(t, d)
plt.plot(t, m)
plt.show()
def levenberg_marquardt(d, t, x, r_func, j_func, maxit=100, lamda=1, K=10, eps1=1e-6, eps2=1e-6):
Returns a vector containing the optimal parameter values found by the algorithm.
INPUTS:
d Vector of measured values.
t Value of independent variable at the sampled points.
x Vector of parameters.
r_func Function which generates the residual vector.
j_func Function which generates the Jacobian.
maxiter Maximum number of iterations.
lamda Initial value of tuning parameter.
K Initial value of retuning factor.
eps1 First tolerance parameter - triggers when residual is below this number.
eps2 Second tolerance parameter - triggers when relative changes to the parameter
vector are below this number.
#Initialises some important values and stores the original lamda value.
r = r_func(x, t, d)
old_chi = np.linalg.norm(r)
olamda = lamda
it = 0
while True:
#Heavy lifting portion of the algorithm. Computes the parameter update.
#This is just the implementation of the mathmatical update rule.
J = j_func(x, t)
JT = J.T
JTJ = JT.dot(J)
JTJdiag = np.eye(JTJ.shape[0])*JTJ
JTJinv = np.linalg.pinv(JTJ + lamda*JTJdiag)
JTr = JT.dot(r)
delta_x = JTJinv.dot(JTr)
x += delta_x
#Convergence tests. If a solution has been found, returns the result.
#The chi value is the norm of the residual and is used to determine
#whether the solution is improving. If the chi value is sufficiently
#small, the function terminates. The second test checks to see whether
#or not the solution is improving, and terminates if it isn't.
r = r_func(x, t, d)
new_chi = np.linalg.norm(r)
if new_chi < eps1:
return x
elif np.linalg.norm(delta_x) < eps2*(np.linalg.norm(x) + eps2):
return x
#Tuning stage. If the parameter update was good, continue and restore lamda.
#If the update was bad, scale lamda by K and revert last update.
if new_chi > old_chi:
x -= delta_x
lamda = lamda*K
else:
old_chi = new_chi
lamda = olamda
#If the number of iterations grows too large, return the last value of x.
it += 1
if it >= maxit:
return x
solved_x = levenberg_marquardt(d, t, x, sinusoid_residual, sinusoid_jacobian)
print(solved_x)
plt.plot(t, d, label="Data")
plt.plot(t, sinusoid(solved_x, t), label="LM")
plt.plot(t, sinusoid(true_x, t), label="Truth")
plt.xlabel("t")
plt.legend(loc='upper right')
plt.show()
plt.plot(t, d, label="Data")
plt.plot(t, sinusoid(solved_x, t), label="LM")
plt.xlabel("t")
plt.legend(loc='upper right')
plt.show()
x = np.array([8., 43.5, 1.05])
leastsq_x = leastsq(sinusoid_residual, x, args=(t, d))
print("scipy.optimize.leastsq: ", leastsq_x[0])
print("Our LM: ", solved_x)
plt.plot(t, d, label="Data")
plt.plot(t, sinusoid(leastsq_x[0], t), label="optimize.leastsq")
plt.xlabel("t")
plt.legend(loc='upper right')
plt.show()
x = np.array([8., 35., 1.05])
leastsq_x = leastsq(sinusoid_residual, x, args=(t, d))
print("scipy.optimize.leastsq: ", leastsq_x[0])
print("Our LM: ", solved_x)
plt.plot(t, d, label="Data")
plt.plot(t, sinusoid(leastsq_x[0], t), label="optimize.leastsq")
plt.xlabel("t")
plt.legend(loc='upper right')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The first function we will use is aop_h5refl2array. This function is loaded into the cell below, we encourage you to look through the code to understand what it is doing -- most of these steps should look familiar to you from the first lesson. This function can be thought of as a wrapper to automate the steps required to read AOP hdf5 reflectance tiles into a Python format. This function also cleans the data
Step3: If you forget what this function does, or don't want to scroll up to read the docstrings, remember you can use help or ? to display the associated docstrings.
Step4: Now that we have an idea of how this function works, let's try it out. First, define the path where th e reflectance data is stored and use os.path.join to create the full path to the data file. Note that if you want to run this notebook later on a different reflectance tile, you just have to change this variable.
Step5: Now that we've specified our reflectance tile, we can call aop_h5refl2array to read in the reflectance tile as a python array called sercRefl , and the associated metadata into a dictionary sercMetadata
Step6: We can use the shape method to see the dimensions of the array we read in. NEON tiles are (1000 x 1000 x # of bands), the number of bands may vary depending on the hyperspectral sensor used, but should be in the vicinity of 426.
Step7: plot_aop_refl
Step8: Now that we have loaded this function, let's extract a single band from the SERC reflectance array and plot it
Step9: RGB Plots - Band Stacking
Step10: First, we will look at red, green, and blue bands, whos indices are defined below. To confirm that these band indices correspond to wavelengths in the expected portion of the spectrum, we can print out the wavelength values stored in metadata['wavelength']
Step11: Below we use stack_rgb to create an RGB array. Check that the dimensions of this array are as expected.
Step12: plot_aop_refl
Step13: You'll notice that this image is very dark; it is possible to make out some of the features (roads, buildings), but it is not ideal. Since colorlimits don't apply to 3-band images, we have to use some other image processing tools to enhance the visibility of this image.
Step14: False Color Image - Color Infrared (CIR)
Step15: Demo
Step16: Demo
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import h5py, os, osr, copy
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
def aop_h5refl2array(refl_filename):
aop_h5refl2array reads in a NEON AOP reflectance hdf5 file and returns
1. reflectance array (with the no data value and reflectance scale factor applied)
2. dictionary of metadata including spatial information, and wavelengths of the bands
--------
Parameters
refl_filename -- full or relative path and name of reflectance hdf5 file
--------
Returns
--------
reflArray:
array of reflectance values
metadata:
dictionary containing the following metadata:
bad_band_window1 (tuple)
bad_band_window2 (tuple)
bands: # of bands (float)
data ignore value: value corresponding to no data (float)
epsg: coordinate system code (float)
map info: coordinate system, datum & ellipsoid, pixel dimensions, and origin coordinates (string)
reflectance scale factor: factor by which reflectance is scaled (float)
wavelength: wavelength values (float)
wavelength unit: 'm' (string)
--------
NOTE: This function applies to the NEON hdf5 format implemented in 2016, and should be used for
data acquired 2016 and after. Data in earlier NEON hdf5 format (collected prior to 2016) is
expected to be re-processed after the 2018 flight season.
--------
Example Execution:
--------
sercRefl, sercRefl_metadata = h5refl2array('NEON_D02_SERC_DP3_368000_4306000_reflectance.h5')
import h5py
#Read in reflectance hdf5 file
hdf5_file = h5py.File(refl_filename,'r')
#Get the site name
file_attrs_string = str(list(hdf5_file.items()))
file_attrs_string_split = file_attrs_string.split("'")
sitename = file_attrs_string_split[1]
#Extract the reflectance & wavelength datasets
refl = hdf5_file[sitename]['Reflectance']
reflData = refl['Reflectance_Data']
reflRaw = refl['Reflectance_Data'].value
#Create dictionary containing relevant metadata information
metadata = {}
metadata['map info'] = refl['Metadata']['Coordinate_System']['Map_Info'].value
metadata['wavelength'] = refl['Metadata']['Spectral_Data']['Wavelength'].value
#Extract no data value & scale factor
metadata['data ignore value'] = float(reflData.attrs['Data_Ignore_Value'])
metadata['reflectance scale factor'] = float(reflData.attrs['Scale_Factor'])
#metadata['interleave'] = reflData.attrs['Interleave']
#Apply no data value
reflClean = reflRaw.astype(float)
arr_size = reflClean.shape
if metadata['data ignore value'] in reflRaw:
print('% No Data: ',np.round(np.count_nonzero(reflClean==metadata['data ignore value'])*100/(arr_size[0]*arr_size[1]*arr_size[2]),1))
nodata_ind = np.where(reflClean==metadata['data ignore value'])
reflClean[nodata_ind]=np.nan
#Apply scale factor
reflArray = reflClean/metadata['reflectance scale factor']
#Extract spatial extent from attributes
metadata['spatial extent'] = reflData.attrs['Spatial_Extent_meters']
#Extract bad band windows
metadata['bad band window1'] = (refl.attrs['Band_Window_1_Nanometers'])
metadata['bad band window2'] = (refl.attrs['Band_Window_2_Nanometers'])
#Extract projection information
#metadata['projection'] = refl['Metadata']['Coordinate_System']['Proj4'].value
metadata['epsg'] = int(refl['Metadata']['Coordinate_System']['EPSG Code'].value)
#Extract map information: spatial extent & resolution (pixel size)
mapInfo = refl['Metadata']['Coordinate_System']['Map_Info'].value
hdf5_file.close
return reflArray, metadata
help(aop_h5refl2array)
aop_h5refl2array?
serc_h5_tile = ('../data/NEON_D02_SERC_DP3_368000_4306000_reflectance.h5')
sercRefl,sercMetadata = aop_h5refl2array(serc_h5_tile)
sercRefl.shape
def plot_aop_refl(band_array,refl_extent,colorlimit=(0,1),ax=plt.gca(),title='',cbar ='on',cmap_title='',colormap='Greys'):
'''plot_refl_data reads in and plots a single band or 3 stacked bands of a reflectance array
--------
Parameters
--------
band_array: array of reflectance values, created from aop_h5refl2array
refl_extent: extent of reflectance data to be plotted (xMin, xMax, yMin, yMax)
use metadata['spatial extent'] from aop_h5refl2array function
colorlimit: optional, range of values to plot (min,max).
- helpful to look at the histogram of reflectance values before plotting to determine colorlimit.
ax: optional, default = current axis
title: optional; plot title (string)
cmap_title: optional; colorbar title
colormap: optional (string, see https://matplotlib.org/examples/color/colormaps_reference.html) for list of colormaps
--------
Returns
--------
plots flightline array of single band of reflectance data
--------
Examples:
--------
plot_aop_refl(sercb56,
sercMetadata['spatial extent'],
colorlimit=(0,0.3),
title='SERC Band 56 Reflectance',
cmap_title='Reflectance',
colormap='Greys_r') '''
import matplotlib.pyplot as plt
plot = plt.imshow(band_array,extent=refl_extent,clim=colorlimit);
if cbar == 'on':
cbar = plt.colorbar(plot,aspect=40); plt.set_cmap(colormap);
cbar.set_label(cmap_title,rotation=90,labelpad=20)
plt.title(title); ax = plt.gca();
ax.ticklabel_format(useOffset=False, style='plain'); #do not use scientific notation for ticklabels
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90); #rotate x tick labels 90 degrees
sercb56 = sercRefl[:,:,55]
plot_aop_refl(sercb56,
sercMetadata['spatial extent'],
colorlimit=(0,0.3),
title='SERC Band 56 Reflectance',
cmap_title='Reflectance',
colormap='Greys_r')
def stack_rgb(reflArray,bands):
import numpy as np
red = reflArray[:,:,bands[0]-1]
green = reflArray[:,:,bands[1]-1]
blue = reflArray[:,:,bands[2]-1]
stackedRGB = np.stack((red,green,blue),axis=2)
return stackedRGB
rgb_bands = (58,34,19)
print('Band 58 Center Wavelength = %.2f' %(sercMetadata['wavelength'][57]),'nm')
print('Band 33 Center Wavelength = %.2f' %(sercMetadata['wavelength'][33]),'nm')
print('Band 19 Center Wavelength = %.2f' %(sercMetadata['wavelength'][18]),'nm')
SERCrgb = stack_rgb(sercRefl,rgb_bands)
SERCrgb.shape
plot_aop_refl(SERCrgb,
sercMetadata['spatial extent'],
title='SERC RGB Image',
cbar='off')
from skimage import exposure
def plot_aop_rgb(rgbArray,ext,ls_pct=5,plot_title=''):
from skimage import exposure
pLow, pHigh = np.percentile(rgbArray[~np.isnan(rgbArray)], (ls_pct,100-ls_pct))
img_rescale = exposure.rescale_intensity(rgbArray, in_range=(pLow,pHigh))
plt.imshow(img_rescale,extent=ext)
plt.title(plot_title + '\n Linear ' + str(ls_pct) + '% Contrast Stretch');
ax = plt.gca(); ax.ticklabel_format(useOffset=False, style='plain') #do not use scientific notation #
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90) #rotate x tick labels 90 degree
plot_aop_rgb(SERCrgb,
sercMetadata['spatial extent'],
plot_title = 'SERC RGB')
CIRbands = (90,34,19)
print('Band 90 Center Wavelength = %.2f' %(sercMetadata['wavelength'][89]),'nm')
print('Band 34 Center Wavelength = %.2f' %(sercMetadata['wavelength'][33]),'nm')
print('Band 19 Center Wavelength = %.2f' %(sercMetadata['wavelength'][18]),'nm')
SERCcir = stack_rgb(sercRefl,CIRbands)
plot_aop_rgb(SERCcir,
sercMetadata['spatial extent'],
ls_pct=2,
plot_title='SERC CIR')
from IPython.html.widgets import *
array = copy.copy(sercRefl)
metadata = copy.copy(sercMetadata)
def RGBplot_widget(R,G,B):
#Pre-allocate array size
rgbArray = np.zeros((array.shape[0],array.shape[1],3), 'uint8')
Rband = array[:,:,R-1].astype(np.float)
#Rband_clean = clean_band(Rband,Refl_md)
Gband = array[:,:,G-1].astype(np.float)
#Gband_clean = clean_band(Gband,Refl_md)
Bband = array[:,:,B-1].astype(np.float)
#Bband_clean = clean_band(Bband,Refl_md)
rgbArray[..., 0] = Rband*256
rgbArray[..., 1] = Gband*256
rgbArray[..., 2] = Bband*256
# Apply Adaptive Histogram Equalization to Improve Contrast:
img_nonan = np.ma.masked_invalid(rgbArray) #first mask the image
img_adapteq = exposure.equalize_adapthist(img_nonan, clip_limit=0.10)
plot = plt.imshow(img_adapteq,extent=metadata['spatial extent']);
plt.title('Bands: \nR:' + str(R) + ' (' + str(round(metadata['wavelength'][R-1])) +'nm)'
+ '\n G:' + str(G) + ' (' + str(round(metadata['wavelength'][G-1])) + 'nm)'
+ '\n B:' + str(B) + ' (' + str(round(metadata['wavelength'][B-1])) + 'nm)');
ax = plt.gca(); ax.ticklabel_format(useOffset=False, style='plain')
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90)
interact(RGBplot_widget, R=(1,426,1), G=(1,426,1), B=(1,426,1))
rgbArray = copy.copy(SERCrgb)
def linearStretch(percent):
pLow, pHigh = np.percentile(rgbArray[~np.isnan(rgbArray)], (percent,100-percent))
img_rescale = exposure.rescale_intensity(rgbArray, in_range=(pLow,pHigh))
plt.imshow(img_rescale,extent=sercMetadata['spatial extent'])
plt.title('SERC RGB \n Linear ' + str(percent) + '% Contrast Stretch');
ax = plt.gca()
ax.ticklabel_format(useOffset=False, style='plain')
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90)
interact(linearStretch,percent=(0,20,1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unicode strings
Step2: The tf.string data type
Step3: A tf.string tensor treats byte strings as atomic units. This enables it to store byte strings of varying lengths. The string length is not included in the tensor dimensions.
Step4: If you use Python to construct strings, note that string literals are Unicode-encoded by default.
Step5: Converting between representations
Step6: Batch dimensions
Step7: You can use this tf.RaggedTensor directly, or convert it to a dense tf.Tensor with padding or a tf.SparseTensor using the methods tf.RaggedTensor.to_tensor and tf.RaggedTensor.to_sparse.
Step8: When encoding multiple strings with the same lengths, use a tf.Tensor as the input.
Step9: When encoding multiple strings with varying length, use a tf.RaggedTensor as the input.
Step10: If you have a tensor with multiple strings in padded or sparse format, convert it first into a tf.RaggedTensor before calling tf.strings.unicode_encode.
Step11: Unicode operations
Step12: Character substrings
Step13: Split Unicode strings
Step14: Byte offsets for characters
Step15: Unicode scripts
Step16: The tf.strings.unicode_script operation can also be applied to multidimensional tf.Tensors or tf.RaggedTensors of codepoints
Step17: Example
Step18: First, decode the sentences into character codepoints, and find the script identifeir for each character.
Step19: Use the script identifiers to determine where word boundaries should be added. Add a word boundary at the beginning of each sentence, and for each character whose script differs from the previous character.
Step20: You can then use those start offsets to build a RaggedTensor containing the list of words from all batches.
Step21: To finish, segment the word codepoints RaggedTensor back into sentences and encode into UTF-8 strings for readability.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
tf.constant(u"Thanks 😊")
tf.constant([u"You're", u"welcome!"]).shape
# Unicode string, represented as a UTF-8 encoded string scalar.
text_utf8 = tf.constant(u"语言处理")
text_utf8
# Unicode string, represented as a UTF-16-BE encoded string scalar.
text_utf16be = tf.constant(u"语言处理".encode("UTF-16-BE"))
text_utf16be
# Unicode string, represented as a vector of Unicode code points.
text_chars = tf.constant([ord(char) for char in u"语言处理"])
text_chars
tf.strings.unicode_decode(text_utf8,
input_encoding='UTF-8')
tf.strings.unicode_encode(text_chars,
output_encoding='UTF-8')
tf.strings.unicode_transcode(text_utf8,
input_encoding='UTF8',
output_encoding='UTF-16-BE')
# A batch of Unicode strings, each represented as a UTF8-encoded string.
batch_utf8 = [s.encode('UTF-8') for s in
[u'hÃllo', u'What is the weather tomorrow', u'Göödnight', u'😊']]
batch_chars_ragged = tf.strings.unicode_decode(batch_utf8,
input_encoding='UTF-8')
for sentence_chars in batch_chars_ragged.to_list():
print(sentence_chars)
batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1)
print(batch_chars_padded.numpy())
batch_chars_sparse = batch_chars_ragged.to_sparse()
nrows, ncols = batch_chars_sparse.dense_shape.numpy()
elements = [['_' for i in range(ncols)] for j in range(nrows)]
for (row, col), value in zip(batch_chars_sparse.indices.numpy(), batch_chars_sparse.values.numpy()):
elements[row][col] = str(value)
# max_width = max(len(value) for row in elements for value in row)
value_lengths = []
for row in elements:
for value in row:
value_lengths.append(len(value))
max_width = max(value_lengths)
print('[%s]' % '\n '.join(
'[%s]' % ', '.join(value.rjust(max_width) for value in row)
for row in elements))
tf.strings.unicode_encode([[99, 97, 116], [100, 111, 103], [99, 111, 119]],
output_encoding='UTF-8')
tf.strings.unicode_encode(batch_chars_ragged, output_encoding='UTF-8')
tf.strings.unicode_encode(
tf.RaggedTensor.from_sparse(batch_chars_sparse),
output_encoding='UTF-8')
tf.strings.unicode_encode(
tf.RaggedTensor.from_tensor(batch_chars_padded, padding=-1),
output_encoding='UTF-8')
# Note that the final character takes up 4 bytes in UTF8.
thanks = u'Thanks 😊'.encode('UTF-8')
num_bytes = tf.strings.length(thanks).numpy()
num_chars = tf.strings.length(thanks, unit='UTF8_CHAR').numpy()
print('{} bytes; {} UTF-8 characters'.format(num_bytes, num_chars))
# Here, unit='BYTE' (default). Returns a single byte with len=1
tf.strings.substr(thanks, pos=7, len=1).numpy()
# Specifying unit='UTF8_CHAR', returns a single 4 byte character in this case
print(tf.strings.substr(thanks, pos=7, len=1, unit='UTF8_CHAR').numpy())
tf.strings.unicode_split(thanks, 'UTF-8').numpy()
codepoints, offsets = tf.strings.unicode_decode_with_offsets(u'🎈🎉🎊', 'UTF-8')
for (codepoint, offset) in zip(codepoints.numpy(), offsets.numpy()):
print('At byte offset {}: codepoint {}'.format(offset, codepoint))
uscript = tf.strings.unicode_script([33464, 1041]) # ['芸', 'Б']
print(uscript.numpy()) # [17, 8] == [USCRIPT_HAN, USCRIPT_CYRILLIC]
print(tf.strings.unicode_script(batch_chars_ragged))
# dtype: string; shape: [num_sentences]
#
# The sentences to process. Edit this line to try out different inputs!
sentence_texts = [u'Hello, world.', u'世界こんにちは']
# dtype: int32; shape: [num_sentences, (num_chars_per_sentence)]
#
# sentence_char_codepoint[i, j] is the codepoint for the j'th character in
# the i'th sentence.
sentence_char_codepoint = tf.strings.unicode_decode(sentence_texts, 'UTF-8')
print(sentence_char_codepoint)
# dtype: int32; shape: [num_sentences, (num_chars_per_sentence)]
#
# sentence_char_scripts[i, j] is the Unicode script of the j'th character in
# the i'th sentence.
sentence_char_script = tf.strings.unicode_script(sentence_char_codepoint)
print(sentence_char_script)
# dtype: bool; shape: [num_sentences, (num_chars_per_sentence)]
#
# sentence_char_starts_word[i, j] is True if the j'th character in the i'th
# sentence is the start of a word.
sentence_char_starts_word = tf.concat(
[tf.fill([sentence_char_script.nrows(), 1], True),
tf.not_equal(sentence_char_script[:, 1:], sentence_char_script[:, :-1])],
axis=1)
# dtype: int64; shape: [num_words]
#
# word_starts[i] is the index of the character that starts the i'th word (in
# the flattened list of characters from all sentences).
word_starts = tf.squeeze(tf.where(sentence_char_starts_word.values), axis=1)
print(word_starts)
# dtype: int32; shape: [num_words, (num_chars_per_word)]
#
# word_char_codepoint[i, j] is the codepoint for the j'th character in the
# i'th word.
word_char_codepoint = tf.RaggedTensor.from_row_starts(
values=sentence_char_codepoint.values,
row_starts=word_starts)
print(word_char_codepoint)
# dtype: int64; shape: [num_sentences]
#
# sentence_num_words[i] is the number of words in the i'th sentence.
sentence_num_words = tf.reduce_sum(
tf.cast(sentence_char_starts_word, tf.int64),
axis=1)
# dtype: int32; shape: [num_sentences, (num_words_per_sentence), (num_chars_per_word)]
#
# sentence_word_char_codepoint[i, j, k] is the codepoint for the k'th character
# in the j'th word in the i'th sentence.
sentence_word_char_codepoint = tf.RaggedTensor.from_row_lengths(
values=word_char_codepoint,
row_lengths=sentence_num_words)
print(sentence_word_char_codepoint)
tf.strings.unicode_encode(sentence_word_char_codepoint, 'UTF-8').to_list()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Helpfully, transformers supports tracing their model with the PyTorch JIT. We use their tutorial on it, the following is copied straight from the tutorial
Step2: Now we can trace our model. As we want to do inference, we impose evaluation mode and not requiring gradients for the parameters.
Step3: Readers of the PyTorch Bert & TVM tutorial will recall the wrapper we had for getting inputs and outputs of a submodule of the model.
Step4: We also had a fancy visualization. We now have a small addition, the dictionary to specify attributes for nodes. This will come in handy later.
Step5: Let's wrap the first BertLayer in our model. You could also take smaller bits if you run my tutorials on your phone and want smaller graphs.
Step6: We trace the module.
Step7: Let's convert the traced model to TVM. This works just as before.
Step8: One thing we'll do in between is to move from a module interface - with named parameters - to a functional
Step9: Let's look at our function.
Step10: As in the BERT inference, we want to run some optimization passes.
Step11: As hinted at above, TVM's gradient taking assumes that it is the last element in the computation (the ones-Tensors discussed above). This isn't a good fit with PyTorch's modular view which expects a grad_out for each output to be given. Happily, this is computationally equivalent to multiplying by grad out and summation, so we amend our function with that. We wish to be flexible, so we allow both functions returning a single tensor and those returning a tuple of tensors.
Step12: Finally we can take the grad. As we get a lot of let nodes, we bring it to normal form.
Step13: TVM's gradient-taking returns a function that has the same parameters as the original function (in our case amended with the grad_out and dropout) and then returns a tuple of the original return and a tuple containing gradients for all inputs.
Step14: Now is a good time to take a look at our graph
Step15: But in PyTorch, we first compute the forward and then the backwards, so we have to take out the saw and
Step16: Now we can split the function as described above. We collect the blue nodes as to capture - but constants will
Step17: Now we can split out the backward, replacing all the blue nodes with variables.
Step18: Next we take the forward and amend it to also return the required intermediates.
Step19: TVM cannot return nested tuples, so we flatten the output in the function. Again we differentiate between tensor-valued functions and tuple valued ones (i.e. those returning potentially multiple tensors).
Step20: And at last, we can let TVM do its magic and compile our functions.
Step21: Time to give it a spin. We define convenience functions to move tensors between PyTorch and TVM and get the model parameters as a TVM dictionary.
Step22: Similarly, we get the inputs on the GPU in PyTorch and TVM.
Step23: We need to deal with the dropout. It will turn out that our record of the dropout random draws happens in the same order as the dropout in the model. We did a depth-first search on the computational graph to find them and if the values of the the dropout are connected in the graph rather than being on independent branches, this will be the order in which PyTorch draws the matrices, too. If not, good luck fiddeling with the order.
Step24: Now we can run the forward.
Step25: And we can compare the output to PyTorch's
Step26: Supergood. Let's also try the backward. We generate a grad_out, set all the variables and run the backward model and run the backward model
Step27: On the PyTorch side, it is easiest to re-run the forward (remembering to reset the random seed) and get the grads.
Step28: Did it work? It seems so
Step29: But we wanted to get something running in PyTorch, right?
Step30: Because calling TVMFunction.apply does not please the eye, we define a convenience function and because we always love to have proper signatures, we also give it the names of our inputs.
Step31: Let's check everything still works.
Step32: Yay!
Step33: Let's give it a spin and see that it hasn't stopped working.
Step34: Even better
Step35: All done!
Step36: Performance
Step37: We build with our log.
|
<ASSISTANT_TASK:>
Python Code:
import inspect
import types
import sys
# I sometimes need to choose PyTorch...
#sys.path.insert(0, '/home/tv/pytorch/pytorch/build/lib.linux-x86_64-3.8//')
import torch
import torch.utils.dlpack
# import TVM
import sys
import os
tvm_root = '/home/tv/rocm/tvm/tvm/'
tvm_paths = [os.path.join(tvm_root, p) for p in ['python', 'topi/python', 'nnvm/python']]
os.environ['PYTHONPATH'] = ':'.join([os.environ.get('PYTHONPATH', '')] + tvm_paths)
for p in tvm_paths:
sys.path.insert(0, p)
import tvm
import tvm.relay
torch.cuda.get_device_name()
import transformers
from transformers import BertModel, BertTokenizer, BertConfig
import numpy
import torch
enc = BertTokenizer.from_pretrained("bert-base-uncased")
# Tokenizing input text
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = enc.tokenize(text)
# Masking one of the input tokens
masked_index = 8
tokenized_text[masked_index] = '[MASK]'
indexed_tokens = enc.convert_tokens_to_ids(tokenized_text)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
# Creating a dummy input
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
dummy_input = [tokens_tensor, segments_tensors]
# If you are instantiating the model with `from_pretrained` you can also easily set the TorchScript flag
model = BertModel.from_pretrained("bert-base-uncased", torchscript=True)
model.eval()
for p in model.parameters():
p.requires_grad_(False)
transformers.__version__
dtype = torch.float32
dtype_str = str(dtype).split('.')[-1]
# Creating the trace
model.to(dtype)
traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors])
traced_model.eval()
for p in traced_model.parameters():
p.requires_grad_(False)
class DebugWrap(torch.nn.Module):
def __init__(self, root, target_qn):
super().__init__()
self.root = (root,) # Hide from PyTorch
parent, = self.root
target_qn = target_qn.split('.')
self.target_basename = target_qn[-1]
for nc in target_qn[:-1]:
parent = getattr(parent, nc)
self.parent = (parent,)
target = getattr(parent, self.target_basename)
self.wrapped = target
setattr(parent, self.target_basename, self)
def remove(self):
parent, = self.parent
setattr(parent, self.target_basename, self.wrapped)
self.root = None
def forward(self, *inp, **kwinp):
assert self.root is not None
self.DEBUG_INP = inp
self.DEBUG_KWINP = kwinp
out = self.wrapped(*inp, **kwinp)
self.DEBUG_OUT = out
return out
import graphviz
def visualize(expr, collapse_small=True, node_attr_dict = {}):
def collect_ops(node):
ops = set()
def visitor(e):
if isinstance(e, tvm.ir.Op):
ops.add(e.name)
tvm.relay.analysis.post_order_visit(node, visitor)
return ops
# node_dict maps a Relay node to an index (node ID)
def _traverse_expr(node, node_dict):
if node in node_dict:
return
node_dict[node] = len(node_dict)
node_dict = {}
tvm.relay.analysis.post_order_visit(expr, lambda x: _traverse_expr(x, node_dict))
relayviz_nodes = []
dot = graphviz.Digraph(format='svg')
dot.attr('node', shape = 'box')
def to_str(node):
if isinstance(node, tvm.relay.Constant):
return repr(node).lstrip('Constant(')[:-1]
else:
raise NotImplementedError("to_str:" + repr(node))
def is_small_const(c):
if not (collapse_small and isinstance(c, tvm.relay.Constant)):
return False
if isinstance(c.data, tvm.runtime.ndarray.NDArray):
return numpy.prod(c.data.shape) < 10
return True
# Sort by node ID
for node, node_id in sorted(node_dict.items(), key=lambda x: x[1]):
if isinstance(node, tvm.relay.Function):
dot.node(str(node_id), 'Function', **node_attr_dict.get(node, {}))
dot.edge(str(node_dict[node.body]), str(node_id))
elif isinstance(node, tvm.relay.Var):
if node.type_annotation is not None:
if hasattr(node.type_annotation, 'shape'):
shape = tuple([int(x) for x in node.type_annotation.shape])
dtype = node.type_annotation.dtype
typstr = 'Tensor[{}, {}]'.format(shape, dtype)
else:
typstr = str(node.type_annotation)
else:
typstr = '?'
d = dict(shape = 'ellipse')
d.update(node_attr_dict.get(node, {}))
dot.node(str(node_id),
'{}: {}'.format(
node.name_hint, typstr
), **d)
elif isinstance(node, tvm.relay.Tuple):
dot.node(str(node_id), 'Tuple[...])', **node_attr_dict.get(node, {}))
for field in node.fields:
dot.edge(str(node_dict[field]), str(node_id))
elif isinstance(node, tvm.relay.Constant):
if not is_small_const(node): # small consts are shown in ops
dot.node(str(node_id), 'Constant({}, {})'.format(node.data.shape, node.data.dtype),
**node_attr_dict.get(node, {}))
elif isinstance(node, tvm.relay.Call):
args_with_edge = []
arg_str_list = []
for arg in node.args:
if is_small_const(arg):
arg_str_list.append(to_str(arg))
else:
arg_str_list.append('·')
args_with_edge.append(arg)
arg_str = ', '.join(arg_str_list)
if isinstance(node.op, tvm.ir.Op):
name = node.op.name
attrs = {k:getattr(node.attrs, k) for k in node.attrs.keys()} if hasattr(node.attrs, 'keys') else {}
#attrs = inspect.getmembers(node.attrs)
attr_str_list = [k+'='+(str(v) if len(str(v))<15 else "...") for k, v in attrs.items()]
if attr_str_list:
attr_str = '| '+ ', '.join(attr_str_list)
else:
attr_str = ''
else:
ops = collect_ops(node)
if ops:
name = '_'.join(ops)
else:
name = '...'
attr_str = ''
s = f'{name}({arg_str}{attr_str})'
dot.node(str(node_id), s, **node_attr_dict.get(node, {}))
for arg in args_with_edge:
dot.edge(str(node_dict[arg]), str(node_id))
elif isinstance(node, tvm.ir.Op):
# dot.node(str(node_id), 'Op {}'.format(node.name))
pass # covered in call
elif isinstance(node, tvm.relay.TupleGetItem):
dot.node(str(node_id), 'TupleGetItem(idx={})'.format(node.index), **node_attr_dict.get(node, {}))
dot.edge(str(node_dict[node.tuple_value]), str(node_id))
elif isinstance(node, tvm.relay.Let):
dot.node(str(node_id), 'Let(XX)', **node_attr_dict.get(node, {}))
dot.edge(str(node_dict[node.value]), str(node_id))
dot.edge(str(node_id), str(node_dict[node.var]))
else:
raise RuntimeError(
'Unknown node type. node_id: {}, node: {}'.format(node_id, type(node)))
return dot
try:
debug_wrap = DebugWrap(model, "encoder.layer.0") # encoder.layer.0.attention.self
tt = tokens_tensor.cpu()
st = segments_tensors.cpu()
model(tt, st)
finally:
debug_wrap.remove()
model.train()
traced_module = torch.jit.trace(debug_wrap.wrapped, [i.to(dtype) for i in debug_wrap.DEBUG_INP[:2]])
shape_list = [(i.debugName().split('.')[0], i.type().sizes()) for i in list(traced_module.graph.inputs())[1:]]
mod, mod_params = tvm.relay.frontend.from_pytorch(traced_module, shape_list, default_dtype=dtype_str)
# the converter will output arguments in an arbitrary order (well, by position of use), we want that of the input
fn = mod['main']
# Careful traced module's vs. non-traced module's parameter ordering.
# Anecdotally, I have not seen orderings differ between the two, though.
arg_order = ([n for n, _ in shape_list]
+[n for n, _ in traced_module.named_parameters()])
tmp_arg_idx = {p.name_hint: i for i, p in enumerate(fn.params)}
fn = tvm.relay.Function([fn.params[tmp_arg_idx[n]] for n in arg_order], fn.body)
visualize(fn)
import numpy
def work_on_fn(pass_cls):
def apply_pass(fn_or_mod):
if isinstance(fn_or_mod, tvm.IRModule):
return pass_cls()(fn_or_mod)
if isinstance(fn_or_mod, tvm.relay.Function):
return pass_cls()(
tvm.IRModule({'main': fn_or_mod}))['main']
raise NotImplemented("unsupporded type {}".format(type(fn_or_mod)))
return apply_pass
infer_type = work_on_fn(tvm.relay.transform.InferType)
to_graph_normal_form = work_on_fn(tvm.relay.transform.ToGraphNormalForm)
dead_code_elimination = work_on_fn(tvm.relay.transform.DeadCodeElimination)
eliminate_common_subexpr = work_on_fn(tvm.relay.transform.EliminateCommonSubexpr)
class ShapeConstDedupMutator(tvm.relay.ExprMutator):
def __init__(self):
super().__init__()
self.shape_consts = {}
def visit_call(self, call):
if (isinstance(call.op, tvm.ir.Op)
and call.op.name in {"reshape", "broadcast_to", "collapse_sum_to"}
and isinstance(call.args[1], tvm.relay.Constant)):
# assert list(call.attrs.newshape) == list(call.args[1].data.asnumpy())
new_fn = self.visit(call.op)
new_args = [self.visit(arg) for arg in call.args]
const = new_args[1]
assert const.data.dtype.startswith('int') and len(const.data.shape)==1
key = tuple(const.data.asnumpy())
if key in self.shape_consts:
new_args[1] = self.shape_consts[key]
else:
self.shape_consts[key] = new_args[1]
return tvm.relay.Call(new_fn, new_args, call.attrs)
return super().visit_call(call)
class TransposeDedupMutator(tvm.relay.ExprMutator):
def visit_call(self, call):
if (isinstance(call.op, tvm.ir.Op) and call.op.name == "transpose"
and isinstance(call.args[0], tvm.relay.Call)
and isinstance(call.args[0].op, tvm.ir.Op) and call.args[0].op.name == "transpose"):
axes = [call.args[0].attrs.axes[int(i)] for i in call.attrs.axes]
new_inp = self.visit(call.args[0].args[0])
if axes == list(range(len(axes))): # neutral permutation, should really do this separately...
return new_inp
return tvm.relay.transpose(new_inp, axes)
return super().visit_call(call)
#@tvm.relay.transform.function_pass(opt_level=1)
#def TransposeDedup(fn, mod, ctx):
# return TransposeDedupMutator().visit(fn)
class ZeroZapp(tvm.relay.dataflow_pattern.DFPatternCallback):
def __init__(self):
self.zeros = tvm.relay.dataflow_pattern.is_op("zeros")(tvm.relay.dataflow_pattern.wildcard())
self.other_tensor = tvm.relay.dataflow_pattern.wildcard()
self.pattern = (self.zeros + self.other_tensor) | (self.other_tensor + self.zeros)
def callback(self, pre, post, node_map):
rt = node_map[self.pattern][0]
ot = node_map[self.other_tensor][0]
if (ot._checked_type_ == rt._checked_type_):
return ot
else:
return tvm.relay.broadcast_to(ot, list(rt._checked_type_.shape))
class ZeroZapp(tvm.relay.dataflow_pattern.DFPatternCallback):
def __init__(self):
self.ones = tvm.relay.dataflow_pattern.is_op("zeros")(tvm.relay.dataflow_pattern.wildcard()) | tvm.relay.dataflow_pattern.is_constant()
self.other_tensor = tvm.relay.dataflow_pattern.wildcard()
self.pattern = (self.ones + self.other_tensor) | (self.other_tensor + self.ones)
def callback(self, pre, post, node_map):
rt = node_map[self.pattern][0]
ones = node_map[self.ones][0]
ot = node_map[self.other_tensor][0]
if isinstance(ones, tvm.relay.Constant):
val = ones.data.asnumpy()
if not ((val == 0) if numpy.isscalar(val) else (val == 0).all()):
return rt
# I don't know why I don't reliably get checked types here...
if (((rt._checked_type_ is not None) and (ot._checked_type_ == rt._checked_type_))
or (rt.type_args[0] == rt.type_args[1])):
return ot
elif (rt._checked_type_ is not None):
return tvm.relay.broadcast_to(ot, list(rt._checked_type_.shape))
return rt
class OneZapp(tvm.relay.dataflow_pattern.DFPatternCallback):
def __init__(self):
self.ones = tvm.relay.dataflow_pattern.is_op("ones")(tvm.relay.dataflow_pattern.wildcard()) | tvm.relay.dataflow_pattern.is_constant()
self.other_tensor = tvm.relay.dataflow_pattern.wildcard()
self.pattern = (self.ones * self.other_tensor) | (self.other_tensor * self.ones)
def callback(self, pre, post, node_map):
global val
rt = node_map[self.pattern][0]
ones = node_map[self.ones][0]
ot = node_map[self.other_tensor][0]
if isinstance(ones, tvm.relay.Constant):
val = ones.data.asnumpy()
if not ((val == 1) if numpy.isscalar(val) else (val == 1).all()):
return rt
if (((rt._checked_type_ is not None) and (ot._checked_type_ == rt._checked_type_))
or (rt.type_args[0] == rt.type_args[1])):
return ot
if (rt._checked_type_ is not None):
return tvm.relay.broadcast_to(ot, list(rt._checked_type_.shape))
return rt
class LikeZapp(tvm.relay.dataflow_pattern.DFPatternCallback):
def __init__(self):
self.translations_with_dt = {'zeros_like': tvm.relay.zeros,
'ones_like': tvm.relay.ones}
self.data_tensor = tvm.relay.dataflow_pattern.wildcard()
self.pattern_tensor = tvm.relay.dataflow_pattern.wildcard()
self.pattern = ((tvm.relay.dataflow_pattern.is_op("zeros_like")
| tvm.relay.dataflow_pattern.is_op("ones_like")
)(self.data_tensor)
) | ((
tvm.relay.dataflow_pattern.is_op("collapse_sum_like")
| tvm.relay.dataflow_pattern.is_op("reshape_like")
| tvm.relay.dataflow_pattern.is_op("broadcast_to_like")
)(self.data_tensor, self.pattern_tensor))
def callback(self, pre, post, node_map):
data = node_map[self.data_tensor][0]
res = node_map[self.pattern][0]
if res.op.name in self.translations_with_dt:
ret = self.translations_with_dt[res.op.name](list(res.type_args[0].shape),
res.type_args[0].dtype) # which dtype?
return ret
if (res.type_args[0] is not None and res.type_args[0] == res.type_args[1]):
return data
if res.op.name == 'broadcast_to_like':
return tvm.relay.broadcast_to(data, list(res.type_args[1].shape))
if res.op.name == 'reshape_like':
return tvm.relay.reshape(data, list(res.type_args[1].shape))
if res.op.name == 'collapse_sum_like':
return tvm.relay.collapse_sum_to(data, list(res.type_args[1].shape))
return res
class DecomposeLayerNorm(tvm.relay.dataflow_pattern.DFPatternCallback):
# TVM doesn't have a LayerNorm backward
def __init__(self):
self.pattern = tvm.relay.dataflow_pattern.is_op("nn.layer_norm")(
tvm.relay.dataflow_pattern.wildcard(),
tvm.relay.dataflow_pattern.wildcard(),
tvm.relay.dataflow_pattern.wildcard())
def callback(self, pre, post, node_map):
# probably only 1d...
res = node_map[self.pattern][0]
inp, weight, bias = res.args
mean = tvm.relay.mean(inp, axis=res.attrs.axis, keepdims=True)
std = tvm.relay.std(inp, axis=res.attrs.axis, keepdims=True)
res_new = ((inp - mean) / (std + tvm.relay.const(res.attrs.epsilon, dtype=res.type_args[0].dtype))) * weight + bias
return res_new
class ExternalizeDropout(tvm.relay.dataflow_pattern.DFPatternCallback):
# TVM doesn't have a Dropout defined (for inference it can be deleted)
# but it also does not appear to have random, so we make the random draw
# an input
def __init__(self):
self.dropout_info = {}
self.counter = 0
self.inp = tvm.relay.dataflow_pattern.wildcard()
self.dropout = tvm.relay.dataflow_pattern.is_op("nn.dropout")(self.inp)
self.pattern = tvm.relay.dataflow_pattern.is_tuple_get_item(self.dropout, 0)
def callback(self, pre, post, node_map):
res = node_map[self.pattern][0]
dropout = node_map[self.dropout][0]
inp = node_map[self.inp][0]
typ = dropout.type_args[0]
rate = dropout.attrs.rate
name = f"dropout:{self.counter}"
self.counter += 1
do_var = tvm.relay.var(name, type_annotation=typ)
self.dropout_info[name] = (rate, typ)
return inp * (do_var * tvm.relay.const(1 / (1 - rate), dtype=typ.dtype))
def externalize_dropout(fn):
edo = ExternalizeDropout()
fn = tvm.relay.dataflow_pattern.rewrite(edo, fn)
return fn, edo.dropout_info
fn = TransposeDedupMutator().visit(fn)
fn = infer_type(fn)
output_type = fn.body.checked_type
if isinstance(output_type, tvm.relay.TensorType):
gr_out = tvm.relay.var("gr:out", output_type)
fn_for_gr = tvm.relay.Function(list(fn.params) + [gr_out], tvm.relay.sum(fn.body * gr_out))
else:
# we can try to handle tuples of tensors, but our nesting patience ends there
assert (isinstance(output_type, tvm.relay.TupleType) and
all([isinstance(f, tvm.relay.TensorType) for f in output_type.fields]))
gr_outs = [tvm.relay.var(f"gr:out:{i}", t) for i, t in enumerate(output_type.fields)]
prods_with_gr_out = [tvm.relay.sum(tvm.relay.TupleGetItem(fn.body, i) * go_i)
for i, go_i in enumerate(gr_outs)]
s = prods_with_gr_out[0]
for p in prods_with_gr_out[1:]:
s = s + p
fn_for_gr = tvm.relay.Function(list(fn.params) + gr_outs, s)
fn_for_gr = infer_type(fn_for_gr)
fn_for_gr = tvm.relay.dataflow_pattern.rewrite(DecomposeLayerNorm(), fn_for_gr)
fn_for_gr = infer_type(fn_for_gr)
fn_for_gr, dropout_info = externalize_dropout(fn_for_gr)
fn_for_gr = infer_type(fn_for_gr)
visualize(fn_for_gr)
grfn = tvm.relay.transform.gradient(fn_for_gr, mode='first_order')
grfn = to_graph_normal_form(grfn)
# Now we have (sum(orig_out * grad_out), (grad_inp_1, ..., grad_inp_n, grad_grad_out, gr_dropout ...))
# but we only want orig_out and grad_inp_1, ..., grad_inp_n
def is_aux_input(p):
return p.name_hint.startswith('dropout:') or p.name_hint.startswith('gr:out:')
# the gr_out and dropout parameters will have gradients computed, but we do not want that
grads_to_keep = tvm.relay.Tuple([g for p, g in zip(grfn.params, grfn.body.fields[1].fields)
if not is_aux_input(p)])
assert grfn.body.fields[0].op.name == 'sum'
assert grfn.body.fields[0].args[0].op.name == 'multiply'
if isinstance(output_type, tvm.relay.TensorType):
orig_out = grfn.body.fields[0].args[0].args[0]
else:
assert isinstance(output_type, tvm.relay.TupleType)
orig_out = grfn.body.fields[0].args[0].args[0].tuple_value
out_and_grad = tvm.relay.Tuple([orig_out, grads_to_keep])
out_and_grad_fn = tvm.relay.Function(grfn.params, out_and_grad)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = dead_code_elimination(out_and_grad_fn)
out_and_grad_fn = eliminate_common_subexpr(out_and_grad_fn)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = tvm.relay.dataflow_pattern.rewrite(LikeZapp(), out_and_grad_fn)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = tvm.relay.dataflow_pattern.rewrite(ZeroZapp(), out_and_grad_fn)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = tvm.relay.dataflow_pattern.rewrite(OneZapp(), out_and_grad_fn)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = tvm.relay.dataflow_pattern.rewrite(OneZapp(), out_and_grad_fn)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = dead_code_elimination(out_and_grad_fn)
out_and_grad_fn = eliminate_common_subexpr(out_and_grad_fn)
visualize(out_and_grad_fn)
orig_out = out_and_grad_fn.body.fields[0]
grad_ins = out_and_grad_fn.body.fields[1]
color_dict = {}
def color(n, c):
if n in color_dict:
return
color_dict[n] = c
for a in getattr(n, 'args', []):
color(a, c)
for a in getattr(n, 'fields', []):
color(a, c)
for nam in ('body', 'tuple_value'):
b = getattr(n, nam, None)
if b is not None:
color(b, c)
color(orig_out, {'color': 'red'})
seen = set()
def color_crossings(n, c):
if n in seen:
return
seen.add(n)
if n in color_dict:
color_dict[n] = c
return
for a in getattr(n, 'args', []):
color_crossings(a, c)
for a in getattr(n, 'fields', []):
color_crossings(a, c)
for nam in ('body', 'tuple_value'):
b = getattr(n, nam, None)
if b is not None:
color_crossings(b, c)
color_crossings(grad_ins, {'color': 'blue'})
visualize(out_and_grad_fn, node_attr_dict=color_dict)
nodes_to_capture = [n for n, v in color_dict.items()
if v['color'] == 'blue' and not isinstance(n, (tvm.relay.Constant, tvm.relay.Var))]
capture_tup = tvm.relay.Tuple(nodes_to_capture)
nodes_to_capture_idx = {n:i for i, n in enumerate(nodes_to_capture)}
capture_vars = [tvm.relay.var(f"input:captures:{i}", type_annotation=nodes_to_capture[i].checked_type)
for i, n in enumerate(nodes_to_capture)]
grads_in = out_and_grad_fn.body.fields[1]
needed_vars = set()
class GradientOnlyMutator(tvm.relay.ExprMutator):
def __init__(self):
super().__init__()
def visit_var(self, var):
needed_vars.add(var)
return var
def visit(self, expr):
if expr in nodes_to_capture_idx:
return capture_vars[nodes_to_capture_idx[expr]]
return super().visit(expr)
grads_in_only = GradientOnlyMutator().visit(grads_in)
gr_only_fn = tvm.relay.Function(sorted(needed_vars) + capture_vars, grads_in_only)
# TODO: check against output of original
fn_for_gr_input_names = {p.name_hint for p in fn_for_gr.params}
needed_var_names = {v.name_hint for v in needed_vars}
assert needed_var_names <= fn_for_gr_input_names
inputs_to_keep = [n for n in needed_vars if not is_aux_input(n)]
capture_tup = tvm.relay.Tuple([n for n in nodes_to_capture])
fw_and_cap_params = [p for p in out_and_grad_fn.params if not p.name_hint.startswith('gr:out:')]
fw_and_cap_fn = tvm.relay.Function(fw_and_cap_params,
tvm.relay.Tuple((out_and_grad_fn.body.fields[0],) + (capture_tup,)))
visualize(fw_and_cap_fn)
if isinstance(fn.body, tvm.relay.Tuple):
# tuple of tensors output
fw_and_cap_fn_flattened = tvm.relay.Function(fw_and_cap_fn.params, tvm.relay.Tuple(list(fw_and_cap_fn.body.fields[0].fields) # or single tensor
+ list(fw_and_cap_fn.body.fields[1].fields)))
else:
# single tensor output
fw_and_cap_fn_flattened = tvm.relay.Function(fw_and_cap_fn.params, tvm.relay.Tuple([fw_and_cap_fn.body.fields[0]]
+ list(fw_and_cap_fn.body.fields[1].fields)))
target = 'rocm -model=gfx906'
target_host = 'llvm'
ctx = tvm.context(target)
fw_and_cap_mod = tvm.IRModule({"main": fw_and_cap_fn_flattened})
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = tvm.relay.build(fw_and_cap_mod,
target=target,
target_host=target_host,
params={})
fw_and_cap_compiled_module = tvm.contrib.graph_runtime.create(graph, lib, ctx)
fw_and_cap_compiled_module.set_input(**params)
gr_only_mod = tvm.IRModule({"main": gr_only_fn})
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = tvm.relay.build(gr_only_mod,
target=target,
target_host=target_host,
params={})
gr_only_compiled_module = tvm.contrib.graph_runtime.create(graph, lib, ctx)
gr_only_compiled_module.set_input(**params) # we do have funny const tensors from TVM :/
def tensor_to_tvm(t):
return tvm.nd.from_dlpack(torch.utils.dlpack.to_dlpack(t))
def tensor_from_tvm(a):
return(torch.utils.dlpack.from_dlpack(a.to_dlpack()))
debug_wrap.wrapped.cuda()
traced_module.cuda()
model_params_tvm = {k: tensor_to_tvm(v) for k, v in debug_wrap.wrapped.state_dict().items()}
inp_c = [i.cuda() for i in debug_wrap.DEBUG_INP[:2]]
inp_tvm = [tensor_to_tvm(i) for i in inp_c]
dropout_info
torch.manual_seed(12345)
drop_c = {}
for k in dropout_info.keys(): # we don't know the order
p, typ = dropout_info[k]
drop_c[k] = torch.nn.functional.dropout(torch.ones([int(i) for i in typ.shape],
dtype=getattr(torch, typ.dtype), device="cuda"), p=p)*(1-p)
drop_tvm = {n: tensor_to_tvm(t) for n, t in drop_c.items()}
fw_and_cap_compiled_module.set_input('input', inp_tvm[0])
fw_and_cap_compiled_module.set_input('attention_mask', inp_tvm[1])
fw_and_cap_compiled_module.set_input(**model_params_tvm)
fw_and_cap_compiled_module.set_input(**drop_tvm)
fw_and_cap_compiled_module.run()
torch.manual_seed(12345)
debug_wrap.wrapped.train()
numpy.abs(fw_and_cap_compiled_module.get_output(0).asnumpy()-debug_wrap.wrapped(*inp_c)[0].detach().cpu().numpy()).max()
gr_out_c = torch.randn(debug_wrap.DEBUG_OUT[0].shape, device="cuda", dtype=debug_wrap.DEBUG_OUT[0].dtype)
num_captures = len(capture_vars)
num_regular_outputs = len(fw_and_cap_fn_flattened.body.fields) - num_captures
captured_values = {v.name_hint: fw_and_cap_compiled_module.get_output(num_regular_outputs + i) for i, v in enumerate(capture_vars)}
#gr_only_compiled_module.set_input('input', inp_tvm[0])
#gr_only_compiled_module.set_input('attention_mask', inp_tvm[1])
gr_only_compiled_module.set_input(**drop_tvm)
gr_only_compiled_module.set_input(**model_params_tvm)
gr_only_compiled_module.set_input(**captured_values)
gr_only_compiled_module.set_input('gr:out:0', tensor_to_tvm(gr_out_c))
gr_only_compiled_module.run()
torch.manual_seed(12345)
debug_wrap.wrapped.train()
inp_c_rq = [i.requires_grad_() for i in inp_c]
for p in debug_wrap.wrapped.parameters():
p.requires_grad_()
res = debug_wrap.wrapped(*inp_c_rq)[0]
grads_pt = torch.autograd.grad(res, inp_c_rq + list(debug_wrap.wrapped.parameters()), gr_out_c, allow_unused=True)
for i, g_pt in enumerate(grads_pt):
print(numpy.abs(gr_only_compiled_module.get_output(i).asnumpy() - g_pt.cpu().numpy()).max())
fw_input_names = [p.name_hint for p in fw_and_cap_fn_flattened.params if not is_aux_input(p)]
input_to_idx = {n:i for i, n in enumerate(fw_input_names)}
inputs_to_keep_idx = [input_to_idx[i.name_hint] for i in inputs_to_keep]
class TVMFunction(torch.autograd.Function):
# nb. using the modules is not thread safe...
@staticmethod
def forward(ctx, *inputs):
assert len(inputs) == len(fw_input_names)
assert all([i.is_cuda for i in inputs])
drop_c = {}
for k in dropout_info.keys(): # we don't know the order
p, typ = dropout_info[k]
drop_c[k] = torch.nn.functional.dropout(torch.ones([int(i) for i in typ.shape],
dtype=getattr(torch, typ.dtype), device="cuda"), p=p)*(1-p)
# we don't need to worry about PyTorch changing these because they're not visible.
# so we don't need save_for_backward here
drop_tvm = {n: tensor_to_tvm(t) for n, t in drop_c.items()}
ctx.drop_tvm = drop_tvm
fw_and_cap_compiled_module.set_input(**drop_tvm)
inputs_tvm = [tensor_to_tvm(t) for t in inputs]
for n, i in zip(fw_input_names, inputs_tvm):
fw_and_cap_compiled_module.set_input(n, i)
fw_and_cap_compiled_module.run()
if isinstance(output_type, tvm.relay.TensorType):
res = tensor_from_tvm(fw_and_cap_compiled_module.get_output(0))
num_outputs = 1
else:
res = tuple(tensor_from_tvm(fw_and_cap_compiled_module.get_output(i))
for i in range(len(output_type.fields)))
num_outputs = len(res)
ctx.save_for_backward(*([inputs[i] for i in inputs_to_keep_idx]
+[tensor_from_tvm(fw_and_cap_compiled_module.get_output(i))
for i in range(num_outputs, fw_and_cap_compiled_module.get_num_outputs())]))
return res
@staticmethod
def backward(ctx, *grad_outs):
saved = ctx.saved_tensors
kept_inputs = {fw_input_names[i]: tensor_to_tvm(t)
for i, t in zip(inputs_to_keep_idx, saved[:len(inputs_to_keep_idx)])}
gr_only_compiled_module.set_input(**kept_inputs)
captures = {f'input:captures:{i}': tensor_to_tvm(t) for i, t in enumerate(saved[len(kept_inputs):])}
gr_only_compiled_module.set_input(**captures)
grad_outs_tvm = {f"gr:out:{i}": tensor_to_tvm(go) for i, go in enumerate(grad_outs)}
gr_only_compiled_module.set_input(**grad_outs_tvm)
gr_only_compiled_module.set_input(**ctx.drop_tvm)
gr_only_compiled_module.run()
grad_in = [tensor_from_tvm(gr_only_compiled_module.get_output(i)) for i in range(gr_only_compiled_module.get_num_outputs())]
return tuple(grad_in)
def tvm_fn(*inputs):
return TVMFunction.apply(*inputs)
tvm_fn.__signature__ = inspect.signature(tvm_fn).replace(
parameters=[inspect.Parameter(n.replace('.', '__'), inspect.Parameter.POSITIONAL_ONLY)
for n in fw_input_names])
inp_all = (inp_c_rq + list(traced_module.parameters()))
torch.manual_seed(12345)
res_tvm = tvm_fn(*inp_all)
grad_outs = tuple(torch.randn_like(r) for r in res_tvm)
grads_tvm = torch.autograd.grad(res_tvm, inp_all, grad_outs)
assert len(grads_tvm) == len(grads_pt)
list((g1-g2).abs().max().item() for g1, g2 in zip(grads_tvm, grads_pt))
def create_tvm_function_from_traced_module(traced_module):
assert traced_model.training, "We only do training right now"
dt = next(traced_module.parameters()).dtype.__str__().split('.')[-1]
shape_list = [(i.debugName().split('.')[0], i.type().sizes()) for i in list(traced_module.graph.inputs())[1:]]
mod, mod_params = tvm.relay.frontend.pytorch.from_pytorch(traced_module, shape_list, default_dtype=dt)
# the converter will output arguments in an arbitrary order (well, by position of use), we want that of the input
fn = mod['main']
# Careful traced module's vs. non-traced module's parameter ordering.
# Anecdotally, I have not seen orderings differ between the two, though.
arg_order = ([n for n, _ in shape_list]
+[n for n, _ in traced_module.named_parameters()])
tmp_arg_idx = {p.name_hint: i for i, p in enumerate(fn.params)}
fn = tvm.relay.Function([fn.params[tmp_arg_idx[n]] for n in arg_order], fn.body)
fn = TransposeDedupMutator().visit(fn)
# prepare function to also use grad_out
fn = infer_type(fn)
output_type = fn.body.checked_type # fn.ret_type :)
if isinstance(output_type, tvm.relay.TensorType):
gr_out = tvm.relay.var("gr:out", output_type)
fn_for_gr = tvm.relay.Function(list(fn.params) + [gr_out], tvm.relay.sum(fn.body * gr_out))
else:
# we can try to handle tuples of tensors, but our nesting patience ends there
assert (isinstance(output_type, tvm.relay.TupleType) and
all([isinstance(f, tvm.relay.TensorType) for f in output_type.fields]))
gr_outs = [tvm.relay.var(f"gr:out:{i}", t) for i, t in enumerate(output_type.fields)]
prods_with_gr_out = [tvm.relay.sum(tvm.relay.TupleGetItem(fn.body, i) * go_i)
for i, go_i in enumerate(gr_outs)]
s = prods_with_gr_out[0]
for p in prods_with_gr_out[1:]:
s = s + p
fn_for_gr = tvm.relay.Function(list(fn.params) + gr_outs, s)
fn_for_gr = infer_type(fn_for_gr)
fn_for_gr = tvm.relay.dataflow_pattern.rewrite(DecomposeLayerNorm(), fn_for_gr)
fn_for_gr = infer_type(fn_for_gr)
fn_for_gr, dropout_info = externalize_dropout(fn_for_gr)
fn_for_gr = infer_type(fn_for_gr)
# take the gradient
grfn = tvm.relay.transform.gradient(fn_for_gr, mode='first_order')
grfn = to_graph_normal_form(grfn)
# removing of unneeded outputs and simplifications of the gradient
# Now we have (sum(orig_out * grad_out), (grad_inp_1, ..., grad_inp_n, grad_grad_out, gr_dropout ...))
# but we only want orig_out and grad_inp_1, ..., grad_inp_n
def is_aux_input(p):
return p.name_hint.startswith('dropout:') or p.name_hint.startswith('gr:out:')
# the gr_out and dropout parameters will have gradients computed, but we do not want that
grads_to_keep = tvm.relay.Tuple([g for p, g in zip(grfn.params, grfn.body.fields[1].fields)
if not is_aux_input(p)])
assert grfn.body.fields[0].op.name == 'sum'
assert grfn.body.fields[0].args[0].op.name == 'multiply'
if isinstance(output_type, tvm.relay.TensorType):
orig_out = grfn.body.fields[0].args[0].args[0]
else:
assert isinstance(output_type, tvm.relay.TupleType)
orig_out = grfn.body.fields[0].args[0].args[0].tuple_value
out_and_grad = tvm.relay.Tuple([orig_out, grads_to_keep])
out_and_grad_fn = tvm.relay.Function(grfn.params, out_and_grad)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = dead_code_elimination(out_and_grad_fn)
out_and_grad_fn = eliminate_common_subexpr(out_and_grad_fn)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = tvm.relay.dataflow_pattern.rewrite(LikeZapp(), out_and_grad_fn)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = tvm.relay.dataflow_pattern.rewrite(ZeroZapp(), out_and_grad_fn)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = tvm.relay.dataflow_pattern.rewrite(OneZapp(), out_and_grad_fn)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = tvm.relay.dataflow_pattern.rewrite(OneZapp(), out_and_grad_fn)
out_and_grad_fn = infer_type(out_and_grad_fn)
out_and_grad_fn = dead_code_elimination(out_and_grad_fn)
out_and_grad_fn = eliminate_common_subexpr(out_and_grad_fn)
# split the graph into forward and backward
orig_out = out_and_grad_fn.body.fields[0]
grad_ins = out_and_grad_fn.body.fields[1]
color_dict = {}
def color(n, c):
if n in color_dict:
return
color_dict[n] = c
for a in getattr(n, 'args', []):
color(a, c)
for a in getattr(n, 'fields', []):
color(a, c)
for nam in ('body', 'tuple_value'):
b = getattr(n, nam, None)
if b is not None:
color(b, c)
color(orig_out, {'color': 'red'})
seen = set()
def color_crossings(n, c):
if n in seen:
return
seen.add(n)
if n in color_dict:
color_dict[n] = c
return
for a in getattr(n, 'args', []):
color_crossings(a, c)
for a in getattr(n, 'fields', []):
color_crossings(a, c)
for nam in ('body', 'tuple_value'):
b = getattr(n, nam, None)
if b is not None:
color_crossings(b, c)
color_crossings(grad_ins, {'color': 'blue'})
nodes_to_capture = [n for n, v in color_dict.items()
if v['color'] == 'blue' and not isinstance(n, (tvm.relay.Constant, tvm.relay.Var))]
capture_tup = tvm.relay.Tuple(nodes_to_capture)
nodes_to_capture_idx = {n:i for i, n in enumerate(nodes_to_capture)}
capture_vars = [tvm.relay.var(f"input:captures:{i}", type_annotation=nodes_to_capture[i].checked_type)
for i, n in enumerate(nodes_to_capture)]
grads_in = out_and_grad_fn.body.fields[1]
needed_vars = set()
class GradientOnlyMutator(tvm.relay.ExprMutator):
def __init__(self):
super().__init__()
def visit_var(self, var):
needed_vars.add(var)
return var
def visit(self, expr):
if expr in nodes_to_capture_idx:
return capture_vars[nodes_to_capture_idx[expr]]
return super().visit(expr)
grads_in_only = GradientOnlyMutator().visit(grads_in)
# TODO: check against output of original
fn_for_gr_input_names = {p.name_hint for p in fn_for_gr.params}
needed_var_names = {v.name_hint for v in needed_vars}
gr_only_fn = tvm.relay.Function(sorted(needed_vars) + capture_vars, grads_in_only)
assert needed_var_names <= fn_for_gr_input_names
inputs_to_keep = [n for n in needed_vars if not is_aux_input(n)]
# build the forward function that also returns the data for the backward
capture_tup = tvm.relay.Tuple([n for n in nodes_to_capture])
fw_and_cap_params = [p for p in out_and_grad_fn.params if not p.name_hint.startswith('gr:out:')]
fw_and_cap_fn = tvm.relay.Function(fw_and_cap_params,
tvm.relay.Tuple((out_and_grad_fn.body.fields[0],) + (capture_tup,)))
if isinstance(fn.body, tvm.relay.Tuple):
# tuple of tensors output
fw_and_cap_fn_flattened = tvm.relay.Function(fw_and_cap_fn.params, tvm.relay.Tuple(list(fw_and_cap_fn.body.fields[0].fields) # or single tensor
+ list(fw_and_cap_fn.body.fields[1].fields)))
else:
# single tensor output
fw_and_cap_fn_flattened = tvm.relay.Function(fw_and_cap_fn.params, tvm.relay.Tuple([fw_and_cap_fn.body.fields[0]]
+ list(fw_and_cap_fn.body.fields[1].fields)))
target = 'rocm -model=gfx906'
target_host = 'llvm'
ctx = tvm.context(target)
fw_and_cap_mod = tvm.IRModule({"main": fw_and_cap_fn_flattened})
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = tvm.relay.build(fw_and_cap_mod,
target=target,
target_host=target_host,
params={})
fw_and_cap_compiled_module = tvm.contrib.graph_runtime.create(graph, lib, ctx)
fw_and_cap_compiled_module.set_input(**params)
gr_only_mod = tvm.IRModule({"main": gr_only_fn})
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = tvm.relay.build(gr_only_mod,
target=target,
target_host=target_host,
params={})
gr_only_compiled_module = tvm.contrib.graph_runtime.create(graph, lib, ctx)
gr_only_compiled_module.set_input(**params) # we may have funny const tensors from TVM
fw_input_names = [p.name_hint for p in fw_and_cap_fn_flattened.params if not is_aux_input(p)]
input_to_idx = {n:i for i, n in enumerate(fw_input_names)}
inputs_to_keep_idx = [input_to_idx[i.name_hint] for i in inputs_to_keep]
class TVMFunction(torch.autograd.Function):
# nb. using the compiled_modules is not thread safe...
@staticmethod
def forward(ctx, *inputs):
assert len(inputs) == len(fw_input_names)
assert all([i.is_cuda for i in inputs])
drop_c = {}
for k in dropout_info.keys(): # we don't know the order
p, typ = dropout_info[k]
drop_c[k] = torch.nn.functional.dropout(torch.ones([int(i) for i in typ.shape],
dtype=getattr(torch, typ.dtype), device="cuda"), p=p)*(1-p)
# we don't need to worry about PyTorch changing these because they're not visible.
# so we don't need save_for_backward here
drop_tvm = {n: tensor_to_tvm(t) for n, t in drop_c.items()}
ctx.drop_tvm = drop_tvm
fw_and_cap_compiled_module.set_input(**drop_tvm)
inputs_tvm = [tensor_to_tvm(t) for t in inputs]
for n, i in zip(fw_input_names, inputs_tvm):
fw_and_cap_compiled_module.set_input(n, i)
fw_and_cap_compiled_module.run()
if isinstance(output_type, tvm.relay.TensorType):
res = tensor_from_tvm(fw_and_cap_compiled_module.get_output(0))
num_outputs = 1
else:
res = tuple(tensor_from_tvm(fw_and_cap_compiled_module.get_output(i))
for i in range(len(output_type.fields)))
num_outputs = len(res)
ctx.save_for_backward(*([inputs[i] for i in inputs_to_keep_idx]
+[tensor_from_tvm(fw_and_cap_compiled_module.get_output(i))
for i in range(num_outputs, fw_and_cap_compiled_module.get_num_outputs())]))
return res
@staticmethod
def backward(ctx, *grad_outs):
saved = ctx.saved_tensors
kept_inputs = {fw_input_names[i]: tensor_to_tvm(t)
for i, t in zip(inputs_to_keep_idx, saved[:len(inputs_to_keep_idx)])}
gr_only_compiled_module.set_input(**kept_inputs)
captures = {f'input:captures:{i}': tensor_to_tvm(t) for i, t in enumerate(saved[len(kept_inputs):])}
gr_only_compiled_module.set_input(**captures)
grad_outs_tvm = {f"gr:out:{i}": tensor_to_tvm(go) for i, go in enumerate(grad_outs)}
gr_only_compiled_module.set_input(**grad_outs_tvm)
gr_only_compiled_module.set_input(**ctx.drop_tvm)
gr_only_compiled_module.run()
grad_in = [tensor_from_tvm(gr_only_compiled_module.get_output(i)) for i in range(gr_only_compiled_module.get_num_outputs())]
return tuple(grad_in)
def tvm_fn(*inputs):
return TVMFunction.apply(*inputs)
tvm_fn.__signature__ = inspect.signature(tvm_fn).replace(
parameters=[inspect.Parameter(n.replace('.', '__'), inspect.Parameter.POSITIONAL_ONLY)
for n in fw_input_names])
return tvm_fn
tvm_fn = create_tvm_function_from_traced_module(traced_module)
inp_all = (inp_c_rq + list(traced_module.parameters()))
torch.manual_seed(12345)
res_tvm = tvm_fn(*inp_all)
grad_outs = tuple(torch.randn_like(r) for r in res_tvm)
grads_tvm = torch.autograd.grad(res_tvm, inp_all, grad_outs)
torch.manual_seed(12345)
res_pt = traced_module(*inp_c_rq)
grads_pt = torch.autograd.grad(res_pt, inp_all, grad_outs)
assert len(res_tvm) == len(res_pt) and len(grads_tvm) == len(grads_pt)
(list((r1-r2).abs().max().item() for r1, r2 in zip(res_tvm, res_pt)),
list((g1-g2).abs().max().item() for g1, g2 in zip(grads_tvm, grads_pt)))
def add_tvm_dispatch(module, sample_inputs):
traced_module = torch.jit.trace(module, sample_inputs, )
tvm_fn = create_tvm_function_from_traced_module(traced_module)
tvm_input_shapes = [(i.shape, i.dtype, i.device) for i in sample_inputs]
old_forward = module.forward
old_remove_tvm_dispatch = getattr(module, 'remove_tvm_dispatch', None)
def forward(self, *inputs):
input_shapes = [(i.shape, i.dtype, i.device) for i in inputs]
if tvm_input_shapes != input_shapes:
res = old_forward(*inputs)
else:
inp_all = inputs + tuple(self.parameters())
res = tvm_fn(*inp_all)
return res
def remove_tvm_dispatch(self):
self.forward = old_forward
if old_remove_tvm_dispatch is not None:
self.remove_tvm_dispatch = old_remove_tvm_dispatch
module.remove_tvm_dispatch = types.MethodType(remove_tvm_dispatch, module)
module.forward = types.MethodType(forward, module)
module = debug_wrap.wrapped
inp_c2 = [torch.cat([i, i], dim=0) for i in inp_c] # batch size 2 will be new
type(module)
add_tvm_dispatch(module, inp_c)
module.forward(*inp_c)
module(*inp_c2) # different shape
module.remove_tvm_dispatch() # cleaning up
tasks1 = tvm.autotvm.task.extract_from_program(fw_and_cap_fn_flattened, target=target, params=params)
tasks2 = tvm.autotvm.task.extract_from_program(gr_only_mod["main"], target=target, params=params)
log_filename = 'bert-train-0.log'
n_trial = 20 # for real tuning, make this 2000!
def do_tune(tasks, log_filename):
tmp_log_file = log_filename + ".tmp"
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " %(i+1, len(tasks))
# we use threading and tornado here to work around TVM and Jupyter colliding over IOLoops
# In a regular python command line, you should be able to just call the tuner...
import threading
import tornado
# create tuner
tuner = tvm.autotvm.tuner.XGBTuner(tsk, loss_type='rank')
if os.path.isfile(tmp_log_file):
tuner.load_history(tvm.autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
def tune_task_fn():
iol = tornado.ioloop.IOLoop() # we need an event loop
tuner.tune(
n_trial=n_trial,
early_stopping=600,
measure_option=tvm.autotvm.measure_option(
builder=tvm.autotvm.LocalBuilder(timeout=10),
runner=tvm.autotvm.LocalRunner(number=20, repeat=3, timeout=4, min_repeat_ms=150)),
callbacks=[
tvm.autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
tvm.autotvm.callback.log_to_file(tmp_log_file)
])
tuning_thread = threading.Thread(target=tune_task_fn) # create a thread start it and wait on it
tuning_thread.start()
tuning_thread.join()
# done tuning, on to the next task
# pick best records to a cache file
tvm.autotvm.record.pick_best(tmp_log_file, log_filename)
#do_tune(tasks1+tasks2, log_filename)
with tvm.autotvm.apply_history_best(log_filename):
tvm_fn = create_tvm_function_from_traced_module(traced_module)
def x():
for i in range(100):
res_tvm = tvm_fn(*inp_all)
grads_tvm = torch.autograd.grad(res_tvm, inp_all, grad_outs)
ctx.sync()
x()
%timeit x()
def x():
for i in range(100):
res_pt = traced_module(*inp_c_rq)
grads_pt = torch.autograd.grad(res_pt, inp_all, grad_outs)
torch.cuda.synchronize()
x()
%timeit x()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Correct coefficients
Step2: Our code - HMC
Step3: Our code - Gradient descent
Step5: Cliburn's code
|
<ASSISTANT_TASK:>
Python Code:
# Load data
X = np.concatenate((np.ones((pima.shape[0],1)),pima[:,0:8]), axis=1)
Y = pima[:,8]
Xs = (X - np.mean(X, axis=0))/np.concatenate((np.ones(1),np.std(X[:,1:], axis=0)))
n, p = X.shape
nsample = 1
nbatch = 768
M = np.identity(p)
C = 0 * np.identity(p)
eps = 0.1
m = 10
V = 0 * np.identity(p)
theta = np.zeros(p)
### HMC version
def logistic(x):
return 1/(1+np.exp(-x))
def U(theta, Y, X):
return - (Y.T @ X @ theta - np.sum(np.log(1+np.exp(X @ theta))) - 0.5 * phi * np.sum(theta**2))
def gradU(theta, Y, X, nbatch):
'''A function that returns the stochastic gradient. Adapted from Eq. 5.
Inputs are:
theta, the parameters
Y, the response
X, the covariates
nbatch, the number of samples to take from the full data
'''
n = X.shape[0]
Y_pred = logistic(X @ theta)
epsilon = (Y[:,np.newaxis] - Y_pred[:,np.newaxis])
grad = X.T @ epsilon - phi * theta[:, np.newaxis]
return -grad/n
#temp = -grad/n
#return temp / np.linalg.norm(temp)
def hmc(Y, X, gradU, M, eps, m, theta, C, V):
theta0 = theta.copy()
# This is just HMC for testing
n = X.shape[0]
p = X.shape[1]
# Precompute
Minv = np.linalg.inv(M)
# Randomly sample momentum
r = np.random.multivariate_normal(np.zeros(p),M)[:,np.newaxis]
# Intial energy
H0 = U(theta, Y, X) + 0.5 * np.asscalar(r.T @ Minv @ r)
# Hamiltonian dynamics
r = r - (eps/2)*gradU(theta, Y, X, nbatch)
for i in range(m):
theta = theta + (eps*Minv@r).ravel()
r = r - eps*gradU(theta, Y, X, nbatch)
theta = theta + (eps*Minv@r).ravel()
r = r - (eps/2)*gradU(theta, Y, X, nbatch)
# Final energy
H1 = U(theta, Y, X) + np.asscalar(0.5 * r.T @ Minv @ r)
# MH step
u = np.random.uniform()
#rho = np.exp(H1 - H0)
rho = np.exp(H0 - H1)
#print('(H0, H1, rho): %s,%s,%s' % (H0, H1, rho))
if u < np.min((1, rho)):
return theta.copy()
else:
return theta0.copy() # reject
return theta
def my_gd(Y, X, gradU, M, eps, m, theta, C, V):
# gradient descent
n = X.shape[0]
p = X.shape[1]
for i in range(m):
theta = theta - eps*gradU(theta, Y, X, nbatch).ravel()
return theta
from sklearn.linear_model import LogisticRegression
# Unscaled
mod_logis = LogisticRegression(fit_intercept=False, C=1e50)
mod_logis.fit(X,Y)
beta_true_unscale = mod_logis.coef_.ravel()
beta_true_unscale
# Scaled
mod_logis = LogisticRegression(fit_intercept=False, C=1e50)
mod_logis.fit(Xs,Y)
beta_true_scale = mod_logis.coef_.ravel()
beta_true_scale
U(np.ones(p)*.1,Y,X)
phi = 1
gradU(np.ones(p)*.1, Y, X, 1)*n
# HMC - Unscaled
nsample = 10000
m = 20
eps = .0001
#theta = np.zeros(p)
theta = beta_true_unscale.copy()
phi = 0.01
np.random.seed(2)
samples = np.zeros((nsample, p))
u = np.zeros(nsample)
for i in range(nsample):
theta = hmc(Y, X, gradU, M, eps, m, theta, C, V)
samples[i] = theta
u[i] = U(theta, Y, X)
np.mean(samples, axis=0) - beta_true_unscale
plt.plot((samples - beta_true_unscale)[:,4])
plt.show()
plt.plot(u)
plt.show()
beta_true_unscale
# HMC - Scaled
nsample = 10000
m = 20
eps = .001
theta = np.zeros(p)
#theta = beta_true_scale.copy()
phi = 0.1
np.random.seed(2)
samples = np.zeros((nsample, p))
u = np.zeros(nsample)
for i in range(nsample):
theta = hmc(Y, Xs, gradU, M, eps, m, theta, C, V)
samples[i] = theta
u[i] = U(theta, Y, Xs)
np.mean(samples, axis=0) - beta_true_scale
plt.plot((samples - beta_true_scale)[:,1])
plt.show()
plt.plot(u)
plt.show()
# HMC - Scaled (no intercept)
nsample = 10000
m = 20
eps = .001
theta = np.zeros(p-1)
#theta = beta_true_scale.copy()[1:]
phi = 1
np.random.seed(2)
samples = np.zeros((nsample, p-1))
u = np.zeros(nsample)
for i in range(nsample):
theta = hmc(Y, Xs[:,1:], gradU, np.identity(p-1), eps, m, theta, C, V)
samples[i] = theta
u[i] = U(theta, Y, Xs[:,1:])
np.mean(samples, axis=0) - beta_true_scale[1:]
plt.plot((samples - beta_true_scale[1:])[:,5])
plt.show()
plt.plot(u)
plt.show()
# Gradient descent - Unscaled
np.random.seed(2)
#res = my_gd(Y, X, gradU, M, .0001, 10000, np.zeros(p), C, V) # Starting at zero
#res = my_gd(Y, X, gradU, M, .0001, 10000, beta_true_unscale.copy(), C, V) # Starting at true values
res = my_gd(Y, X, gradU, M, .0001, 10000, beta_true_unscale.copy(), C, V) # Starting at true values
res - beta_true_unscale
# Gradient descent - Scaled
np.random.seed(2)
res = my_gd(Y, Xs, gradU, M, .1, 20000, np.zeros(p), C, V)
res - beta_true_scale
# Cliburn's gradient descent code
def gd(X, y, beta, alpha, niter):
Gradient descent algorihtm.
n, p = X.shape
Xt = X.T
for i in range(niter):
y_pred = logistic(X @ beta)
epsilon = y - y_pred
grad = Xt @ epsilon / n
beta += alpha * grad
return beta
# Unscaled
#res = gd(X, Y.ravel(), np.zeros(p), alpha=.1, niter=2) # Starting at zero
res = gd(X, Y.ravel(), beta_true_unscale.copy(), alpha=.0001, niter=10000) # Starting at true coefficients
res - beta_true_unscale
# Scaled
res = gd(Xs, Y.ravel(), np.zeros(p), alpha=.1, niter=20000)
res - beta_true_scale
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Try Plotting a Dirichlet Distribution
Step4: Make an Interactive 3D Plot with Parameter Selection
|
<ASSISTANT_TASK:>
Python Code:
trace0 = go.Scatter(
x=[1, 2, 3, 4],
y=[10, 15, 13, 17]
)
trace1 = go.Scatter(
x=[1, 2, 3, 4],
y=[16, 5, 11, 9]
)
data = go.Data([trace0, trace1])
py.iplot(data, filename = 'basic-line')
alpha = np.array([5, 5, 5])
rv = st.dirichlet(alpha)
coord_step = 0.01
coord_range = np.arange(coord_step, 1, coord_step)
xv, yv = np.meshgrid(coord_range, coord_range)
figure(figsize=(12, 10))
sns.heatmap(xv)
figure(figsize=(12, 10))
sns.heatmap(yv)
zv = 1.0 - xv - yv
zv = np.where(zv > 0.0, zv, np.nan)
figure(figsize=(12, 10))
sns.heatmap(zv)
dv = rv.pdf(np.array((xv, yv, zv)))
figure(figsize=(12, 10))
sns.heatmap(dv, vmin=0, vmax=10)
pd.Series(dv.flatten()).describe()
text = [["pdf: %.2f" % d for d in dv[i]] for i in range(dv.shape[0])]
len(text), len(text[0])
data = [
go.Surface(
x=xv,
y=yv,
z=zv,
surfacecolor=dv,
hoverinfo="x+y+z+text",
text=text,
name="Dirichlet(%.2f, %.2f, %.2f)" % tuple(alpha)
)
]
layout = go.Layout(
title='Dirichlet Demo',
autosize=False,
width=800,
height=600,
margin=dict(
l=65,
r=50,
b=65,
t=90
)
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='dirichlet_demo')
def generate_dirichlet_surface(alpha, coord_step=0.01, visible=False, max_pdf=20):
Generate a plotly 3D surface for standard simplex at granularity coord_step, with colors
mapped to Dirichlet(alpha) pdf values. Filter out pts with pdf values exceeding max_pdf.
rv = st.dirichlet(alpha)
coord_range = np.arange(coord_step, 1, coord_step)
xv, yv = np.meshgrid(coord_range, coord_range)
zv = 1.0 - xv - yv
zv = np.where(zv > 0.0, zv, np.nan)
dv = rv.pdf(np.array((xv, yv, zv)))
zv = np.where(dv > max_pdf, np.nan, zv)
dv = np.where(dv > max_pdf, np.nan, dv)
text = [["pdf: %.4f" % d for d in dv[i]] for i in range(dv.shape[0])]
name = "%.1f %.1f %.1f" % tuple(alpha)
res = go.Surface(
x=xv,
y=yv,
z=zv,
surfacecolor=dv,
cmin=0.0,
#cmax=25.0,
#cauto=False,
colorscale='Viridis',
hoverinfo="name+x+y+z+text",
text=text,
name=name,
visible=visible
)
return res
def generate_dirichlet_interactive_figure(alpha_range=None):
Generate a plotly 3D figure with Dirichlet distributions corresponding to a range
of alpha parameters, selectable from drop-down menus.
if alpha_range is None:
alpha_log_beg=-1
alpha_log_end=1
alpha_log_step=5
alpha_log_base=2
alpha_range = np.logspace(alpha_log_beg, alpha_log_end, alpha_log_step, base=alpha_log_base)
n_range = len(alpha_range)
n = n_range ** 3
alphas = []
sfcs = []
visible=True
visibility = np.eye(n).astype(bool)
group_indices = [[], []]
for group_1 in [True, False]:
for alpha_x in alpha_range:
for alpha_y in alpha_range:
for alpha_z in alpha_range:
all_over_1 = alpha_x >= 1 and alpha_y >= 1 and alpha_z >= 1
non_decreasing = alpha_x <= alpha_y <= alpha_z
if (all_over_1 and group_1) or ((not all_over_1) and non_decreasing and (not group_1)):
group_indices[int(all_over_1)].append(len(alphas))
alpha = np.array([alpha_x, alpha_y, alpha_z])
alphas.append(alpha)
print("Generating surface for alpha: %s (group %d)" % (alpha, group_1))
sfc = generate_dirichlet_surface(alpha, coord_step=0.01, visible=visible)
sfcs.append(sfc)
visible=False
data = go.Data(sfcs)
print("Generating layout & figure")
layout = go.Layout(
title='Dirichlet Distribution Interactive Plot',
legend={'xanchor': 'right'},
autosize=True,
width=800,
height=800,
#scene={'bgcolor': 'rgb(217, 217, 217)'},
updatemenus=[
dict(
x=0,
y=1.0 + k_group * 0.1,
yanchor='top',
font={'size': 10, 'family': 'courier'},
buttons=[
dict(
args=['visible', list(visibility[i])],
label='alpha=(%.1f %.1f %.1f)' % tuple(alphas[i]),
method='restyle'
)
for i in group_indices[k_group]]
)
for k_group in [0, 1]
],
)
fig = go.Figure(data=data, layout=layout)
return fig
fig = generate_dirichlet_interactive_figure(alpha_range=np.array([2, 1, 0.9]))
py.iplot(fig, filename='dirichlet_interactive')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 从原有列表转换为数组
Step2: 生成数组
Step3: random
Step4: 范围取值
Step5: | Data type | Description |
Step6: 数组属性
Step7: 运算
Step8: | Operator | Equivalent ufunc | Description |
Step9: 统计类型
Step10: notebook使用小技巧
Step11: 由上代码可已看出np.sum的执行效率高,推荐使用
Step12: | Operator | Equivalent ufunc || Operator | Equivalent ufunc |
Step13: 变形
Step14: 排序
Step15: 拼接
|
<ASSISTANT_TASK:>
Python Code:
import array
a = array.array('i', range(10))
# 数据类型必须统一
a[1] = 's'
a
import numpy as np
a_list = list(range(10))
b = np.array(a_list)
type(b)
a = np.zeros(10, dtype=int)
print(type(a))
# 查看数组类型
a.dtype
a = np.zeros((4,4), dtype=int)
print(type(a))
# 查看数组类型
print(a.dtype)
a
np.ones((4,4), dtype=float)
np.full((3,3), 3.14)
a
np.zeros_like(a)
np.ones_like(a)
np.full_like(a, 4.12, dtype=float)
print(random.randint(5,10))
print(random.random())
np.random.random((3,3))
# 经常会用到
np.random.randint(0,10, (5,5))
list(range(0,10,2))
np.arange(0,3,2)
# 经常用到
np.linspace(0, 3, 10)
# n维的单位矩阵
np.eye(5)
# 嵌套列表的元素访问
var = [[1,2,3], [3,4,5], [5,6,7]]
var[0][0]
# 数组中元素的访问
a = np.array(var)
a[-1][0]
a
# 这两种访问方式是等价的
a[2, 0], a[2][0]
# 数组切片
a[:2, :2]
# 同上边的方式是不等价的
a[:2][:2]
a
# 维度
print(a.ndim)
# shape
print(a.shape)
# size
print(a.size)
# dtype
print(a.dtype)
# a.itemsize
print(a.itemsize)
# nbytes
print(a.nbytes)
a = np.array(list(range(10)))
a
print(a + 10)
print(a - 10)
print(a * 100)
a = np.full((3,3), 1.0, dtype=float)
a + 10 # 等价于 np.add(a, 10)
a = np.linspace(0, np.pi, 5)
b = np.sin(a)
print(a)
print(b)
# 求和
print(sum([1,2,3,4,5,6]))
# 数组一维求和
a = np.full(10, 2.3)
print(sum(a))
# 数组多维求和
a = np.array([[1,2],[3,4]])
print(sum(a))
# np.sum 求和
np.sum(a)
np.sum(a, axis=1)
np.max(a, axis=1)
n = np.random.rand(10000)
%timeit sum(n)
%timeit np.sum(n)
a = np.array(range(10))
a
a > 3
a != 3
a == a
np.all(a>-1)
np.any(a>-1)
a = np.full((2,10), 1, dtype=float)
a
a.reshape(4, 5)
l = [
[1,2,3],
[34,12,4],
[32,2,33]
]
a = np.array(l)
a
np.sort(a)
a.sort(axis=0)
a
a = np.array([1, 2, 3])
b = np.array([[0, 2, 4], [1, 3, 5]])
# 按行去连接
np.concatenate([b,b,b], axis=0)
# 按列去连接
np.concatenate([b,b,b], axis=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fíjate en la estructura de la lista
Step2: Al lío
|
<ASSISTANT_TASK:>
Python Code:
tweets = []
RUTA = ''
for line in open(RUTA).readlines():
tweets.append(line.split('\t'))
ultimo_tweet = tweets[-1]
print('id =>', ultimo_tweet[0])
print('fecha =>', ultimo_tweet[1])
print('autor =>', ultimo_tweet[2])
print('texto =>', ultimo_tweet[3])
# escribe tu código a continuación
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Solving the 1D acoustic wave equation by finite-differences
Step2: Source time function
Step3: Analytical Solution
Step4: Comparison of numerical with analytical solution
|
<ASSISTANT_TASK:>
Python Code:
# Execute this cell to load the notebook's style sheet, then ignore it
from IPython.core.display import HTML
css_file = '../../style/custom.css'
HTML(open(css_file, "r").read())
# Import Libraries (PLEASE RUN THIS CODE FIRST!)
# ----------------------------------------------
import numpy as np
import matplotlib
# Show Plot in The Notebook
matplotlib.use("nbagg")
import matplotlib.pyplot as plt
# Sub-plot Configuration
# ----------------------
from matplotlib import gridspec
# Ignore Warning Messages
# -----------------------
import warnings
warnings.filterwarnings("ignore")
# Definition of modelling parameters
# ----------------------------------
# number of grid points in x-direction
# grid point distance in x-direction
# wave speed in medium (m/s)
# source location in grid in x-direction
# receiver location in grid in x-direction
# maximum number of time steps
# time step
# Plot Source Time Function
# -------------------------
# dominant frequency of the source (Hz)
# source time shift
print('Source frequency =', f0, 'Hz')
# Source time function (Gaussian)
# -------------------------------
# 1st derivative of a Gaussian
src = -2. * (time - t0) * (f0 ** 2) * (np.exp(- (f0 ** 2) * (time - t0) ** 2))
# Plot position configuration
# ---------------------------
plt.ion()
fig1 = plt.figure(figsize=(6, 5))
gs1 = gridspec.GridSpec(1, 2, width_ratios=[1, 1], hspace=0.3, wspace=0.3)
# Plot source time function
# -------------------------
ax1 = plt.subplot(gs1[0])
ax1.plot(time, src) # plot source time function
ax1.set_title('Source Time Function')
ax1.set_xlim(time[0], time[-1])
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Amplitude')
# Plot source spectrum
# --------------------
ax2 = plt.subplot(gs1[1])
# source time function in frequency domain
# time domain to frequency domain
# plot frequency and amplitude
# only display frequency from 0 to 250 Hz
ax2.set_title('Source Spectrum')
ax2.set_xlabel('Frequency (Hz)')
ax2.set_ylabel('Amplitude')
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
plt.show()
# Analytical solution
# -------------------
G = time * 0.
# Initialize coordinates
# ----------------------
x = np.arange(nx)
x = x * dx # coordinate in x-direction
for it in range(nt): # Calculate Green's function (Heaviside function)
if (time[it] - np.abs(x[ir] - x[isrc]) / vp0) >= 0:
G[it] = 1. / (2 * vp0)
# get limit value from the maximum amplitude
# Plotting convolution of Green's function with source wavelet
plt.plot(time, Gc)
plt.title("Analytical solution" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.grid()
plt.show()
# Plot Snapshot & Seismogram (PLEASE RERUN THIS CODE AGAIN AFTER SIMULATION!)
# ---------------------------------------------------------------------------
# Initialize empty pressure arrays
# --------------------------------
# p at time n (now)
# p at time n-1 (past)
# p at time n+1 (present)
# 2nd spatial derivative of p
# Initialize model (assume homogeneous model)
# -------------------------------------------
# initialize wave velocity in model
# Initialize empty seismogram
# ---------------------------
# Plot position configuration
# ---------------------------
plt.ion()
fig2 = plt.figure(figsize=(6, 4))
gs2 = gridspec.GridSpec(1, 2, width_ratios=[1, 1], hspace=0.3, wspace=0.3)
# Plot 1D wave propagation
# ------------------------
# Note: comma is needed to update the variable
ax3 = plt.subplot(gs2[0])
leg1,= ax3.plot(isrc, 0, 'r*', markersize=11) # plot position of the source in snapshot
leg2,= ax3.plot(ir, 0, 'k^', markersize=8) # plot position of the receiver in snapshot
up31,= ax3.plot(p) # plot pressure update each time step
ax3.set_xlim(0, nx)
ax3.set_ylim(-lim, lim)
ax3.set_title('Time Step (nt) = 0')
ax3.set_xlabel('nx')
ax3.set_ylabel('Amplitude')
ax3.legend((leg1, leg2), ('Source', 'Receiver'), loc='upper right', fontsize=10, numpoints=1)
# Plot seismogram
# ---------------
# Note: comma is needed to update the variable
ax4 = plt.subplot(gs2[1])
leg3,= ax4.plot(0,0,'r--',markersize=1) # plot analytical solution marker
leg4,= ax4.plot(0,0,'b-',markersize=1) # plot numerical solution marker
up41,= ax4.plot(time, seis) # update recorded seismogram each time step
up42,= ax4.plot([0], [0], 'r|', markersize=15) # update time step position
ax4.yaxis.tick_right()
ax4.yaxis.set_label_position("right")
ax4.set_xlim(time[0], time[-1])
ax4.set_title('Seismogram')
ax4.set_xlabel('Time (s)')
ax4.set_ylabel('Amplitude')
ax4.legend((leg3, leg4), ('Analytical', 'FD'), loc='upper right', fontsize=10, numpoints=1)
plt.plot(time,Gc,'r--') # plot analytical solution
plt.show()
# 1D Wave Propagation (Finite Difference Solution)
# ------------------------------------------------
# Calculate Partial Derivatives
# -----------------------------
for it in range(nt):
# FD approximation of spatial derivative by 3 point operator
# ----------------------------------------------------------
# Explicit Time Extrapolation by 3 point operator
# -----------------------------------------------
# Add Source Term at isrc
# -----------------------
# Absolute pressure w.r.t analytical solution
# Remap Time Levels
# -----------------
# Output Seismogram
# -----------------
# Update Data for Wave Propagation Plot
# -------------------------------------
idisp = 2 # display frequency
if (it % idisp) == 0:
ax3.set_title('Time Step (nt) = %d' % it)
up31.set_ydata(p)
up41.set_ydata(seis)
up42.set_data(time[it], seis[it])
plt.gcf().canvas.draw()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Vertex constants
Step12: AutoML constants
Step13: Hardware Accelerators
Step14: Container (Docker) image
Step15: Tutorial
Step16: Dataset
Step17: Quick peek at your data
Step18: Dataset
Step19: Now save the unique dataset identifier for the Dataset resource instance you created.
Step20: Train the model
Step21: Construct the task requirements
Step22: Now save the unique identifier of the training pipeline you created.
Step23: Get information on a training pipeline
Step24: Deployment
Step25: Model information
Step26: Model deployment for batch prediction
Step27: Make the batch input file
Step28: Compute instance scaling
Step29: Make batch prediction request
Step30: Now get the unique identifier for the batch prediction job you created.
Step31: Get information on a batch prediction job
Step33: Get the predictions with explanations
Step34: Cleaning up
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import time
import google.cloud.aiplatform_v1beta1 as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# Tabular Dataset type
DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/tables_1.0.0.yaml"
# Tabular Labeling type
LABEL_SCHEMA = (
"gs://google-cloud-aiplatform/schema/dataset/ioformat/table_io_format_1.0.0.yaml"
)
# Tabular Training task
TRAINING_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_tables_1.0.0.yaml"
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_dataset_client():
client = aip.DatasetServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
clients = {}
clients["dataset"] = create_dataset_client()
clients["model"] = create_model_client()
clients["pipeline"] = create_pipeline_client()
clients["job"] = create_job_client()
for client in clients.items():
print(client)
IMPORT_FILE = "gs://cloud-samples-data/tables/iris_1000.csv"
count = ! gsutil cat $IMPORT_FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $IMPORT_FILE | head
heading = ! gsutil cat $IMPORT_FILE | head -n1
label_column = str(heading).split(",")[-1].split("'")[0]
print("Label Column Name", label_column)
if label_column is None:
raise Exception("label column missing")
TIMEOUT = 90
def create_dataset(name, schema, src_uri=None, labels=None, timeout=TIMEOUT):
start_time = time.time()
try:
if src_uri.startswith("gs://"):
metadata = {"input_config": {"gcs_source": {"uri": [src_uri]}}}
elif src_uri.startswith("bq://"):
metadata = {"input_config": {"bigquery_source": {"uri": [src_uri]}}}
dataset = aip.Dataset(
display_name=name,
metadata_schema_uri=schema,
labels=labels,
metadata=json_format.ParseDict(metadata, Value()),
)
operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset)
print("Long running operation:", operation.operation.name)
result = operation.result(timeout=TIMEOUT)
print("time:", time.time() - start_time)
print("response")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" metadata_schema_uri:", result.metadata_schema_uri)
print(" metadata:", dict(result.metadata))
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
print(" etag:", result.etag)
print(" labels:", dict(result.labels))
return result
except Exception as e:
print("exception:", e)
return None
result = create_dataset("iris-" + TIMESTAMP, DATA_SCHEMA, src_uri=IMPORT_FILE)
# The full unique ID for the dataset
dataset_id = result.name
# The short numeric ID for the dataset
dataset_short_id = dataset_id.split("/")[-1]
print(dataset_id)
def create_pipeline(pipeline_name, model_name, dataset, schema, task):
dataset_id = dataset.split("/")[-1]
input_config = {
"dataset_id": dataset_id,
"fraction_split": {
"training_fraction": 0.8,
"validation_fraction": 0.1,
"test_fraction": 0.1,
},
}
training_pipeline = {
"display_name": pipeline_name,
"training_task_definition": schema,
"training_task_inputs": task,
"input_data_config": input_config,
"model_to_upload": {"display_name": model_name},
}
try:
pipeline = clients["pipeline"].create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline
)
print(pipeline)
except Exception as e:
print("exception:", e)
return None
return pipeline
TRANSFORMATIONS = [
{"auto": {"column_name": "sepal_width"}},
{"auto": {"column_name": "sepal_length"}},
{"auto": {"column_name": "petal_length"}},
{"auto": {"column_name": "petal_width"}},
]
PIPE_NAME = "iris_pipe-" + TIMESTAMP
MODEL_NAME = "iris_model-" + TIMESTAMP
task = Value(
struct_value=Struct(
fields={
"target_column": Value(string_value=label_column),
"prediction_type": Value(string_value="classification"),
"train_budget_milli_node_hours": Value(number_value=1000),
"disable_early_stopping": Value(bool_value=False),
"transformations": json_format.ParseDict(TRANSFORMATIONS, Value()),
}
)
)
response = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task)
# The full unique ID for the pipeline
pipeline_id = response.name
# The short numeric ID for the pipeline
pipeline_short_id = pipeline_id.split("/")[-1]
print(pipeline_id)
def get_training_pipeline(name, silent=False):
response = clients["pipeline"].get_training_pipeline(name=name)
if silent:
return response
print("pipeline")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" state:", response.state)
print(" training_task_definition:", response.training_task_definition)
print(" training_task_inputs:", dict(response.training_task_inputs))
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", dict(response.labels))
return response
response = get_training_pipeline(pipeline_id)
while True:
response = get_training_pipeline(pipeline_id, True)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_to_deploy_id = None
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
raise Exception("Training Job Failed")
else:
model_to_deploy = response.model_to_upload
model_to_deploy_id = model_to_deploy.name
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
print("model to deploy:", model_to_deploy_id)
def list_model_evaluations(name):
response = clients["model"].list_model_evaluations(parent=name)
for evaluation in response:
print("model_evaluation")
print(" name:", evaluation.name)
print(" metrics_schema_uri:", evaluation.metrics_schema_uri)
metrics = json_format.MessageToDict(evaluation._pb.metrics)
for metric in metrics.keys():
print(metric)
print("logloss", metrics["logLoss"])
print("auPrc", metrics["auPrc"])
return evaluation.name
last_evaluation = list_model_evaluations(model_to_deploy_id)
HEADING = "petal_length,petal_width,sepal_length,sepal_width"
INSTANCE_1 = "1.4,1.3,5.1,2.8"
INSTANCE_2 = "1.5,1.2,4.7,2.4"
import tensorflow as tf
gcs_input_uri = BUCKET_NAME + "/test.csv"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
f.write(HEADING + "\n")
f.write(str(INSTANCE_1) + "\n")
f.write(str(INSTANCE_2) + "\n")
print(gcs_input_uri)
! gsutil cat $gcs_input_uri
MIN_NODES = 1
MAX_NODES = 1
BATCH_MODEL = "iris_batch-" + TIMESTAMP
def create_batch_prediction_job(
display_name,
model_name,
gcs_source_uri,
gcs_destination_output_uri_prefix,
parameters=None,
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model_name,
"model_parameters": json_format.ParseDict(parameters, Value()),
"input_config": {
"instances_format": IN_FORMAT,
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": OUT_FORMAT,
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
"dedicated_resources": {
"machine_spec": machine_spec,
"starting_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
},
"generate_explanation": True,
}
response = clients["job"].create_batch_prediction_job(
parent=PARENT, batch_prediction_job=batch_prediction_job
)
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try:
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", response.labels)
return response
IN_FORMAT = "csv"
OUT_FORMAT = "csv" # [csv]
response = create_batch_prediction_job(
BATCH_MODEL, model_to_deploy_id, gcs_input_uri, BUCKET_NAME, None
)
# The full unique ID for the batch job
batch_job_id = response.name
# The short numeric ID for the batch job
batch_job_short_id = batch_job_id.split("/")[-1]
print(batch_job_id)
def get_batch_prediction_job(job_name, silent=False):
response = clients["job"].get_batch_prediction_job(name=job_name)
if silent:
return response.output_config.gcs_destination.output_uri_prefix, response.state
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try: # not all data types support explanations
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" error:", response.error)
gcs_destination = response.output_config.gcs_destination
print(" gcs_destination")
print(" output_uri_prefix:", gcs_destination.output_uri_prefix)
return gcs_destination.output_uri_prefix, response.state
predictions, state = get_batch_prediction_job(batch_job_id)
def get_latest_predictions(gcs_out_dir):
Get the latest prediction subfolder using the timestamp in the subfolder name
folders = !gsutil ls $gcs_out_dir
latest = ""
for folder in folders:
subfolder = folder.split("/")[-2]
if subfolder.startswith("prediction-"):
if subfolder > latest:
latest = folder[:-1]
return latest
while True:
predictions, state = get_batch_prediction_job(batch_job_id, True)
if state != aip.JobState.JOB_STATE_SUCCEEDED:
print("The job has not completed:", state)
if state == aip.JobState.JOB_STATE_FAILED:
raise Exception("Batch Job Failed")
else:
folder = get_latest_predictions(predictions)
! gsutil ls $folder/explanation*.csv
! gsutil cat $folder/explanation*.csv
break
time.sleep(60)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-1', 'sandbox-1', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What are the earliest two films listed in the titles dataframe?
Step2: How many movies have the title "Hamlet"?
Step3: How many movies are titled "North by Northwest"?
Step4: When was the first movie titled "Hamlet" made?
Step5: List all of the "Treasure Island" movies from earliest to most recent.
Step6: How many movies were made in the year 1950?
Step7: How many movies were made in the year 1960?
Step8: How many movies were made from 1950 through 1959?
Step9: In what years has a movie titled "Batman" been released?
Step10: How many roles were there in the movie "Inception"?
Step11: How many roles in the movie "Inception" are NOT ranked by an "n" value?
Step12: But how many roles in the movie "Inception" did receive an "n" value?
Step13: Display the cast of "North by Northwest" in their correct "n"-value order, ignoring roles that did not earn a numeric "n" value.
Step14: Display the entire cast, in "n"-order, of the 1972 film "Sleuth".
Step15: Now display the entire cast, in "n"-order, of the 2007 version of "Sleuth".
Step16: How many roles were credited in the silent 1921 version of Hamlet?
Step17: How many roles were credited in Branagh’s 1996 Hamlet?
Step18: How many "Hamlet" roles have been listed in all film credits through history?
Step19: How many people have played an "Ophelia"?
Step20: How many people have played a role called "The Dude"?
Step21: How many people have played a role called "The Stranger"?
Step22: How many roles has Sidney Poitier played throughout his career?
Step23: How many roles has Judi Dench played?
Step24: List the supporting roles (having n=2) played by Cary Grant in the 1940s, in order by year.
Step25: List the leading roles that Cary Grant played in the 1940s in order by year.
Step26: How many roles were available for actors in the 1950s?
Step27: How many roles were avilable for actresses in the 1950s?
Step28: How many leading roles (n=1) were available from the beginning of film history through 1980?
Step29: How many non-leading roles were available through from the beginning of film history through 1980?
Step30: How many roles through 1980 were minor enough that they did not warrant a numeric "n" rank?
|
<ASSISTANT_TASK:>
Python Code:
titles.shape[0]
titles.sort(columns='year')[0:2]
titles[titles['title']=='Hamlet'].shape[0]
titles[titles['title']=='North by Northwest'].shape[0]
titles[titles['title']=='Hamlet'].sort(columns='year')['year'].values[0]
titles[titles['title']=='Treasure Island'].sort(columns='year')
titles[titles['year']==1950].shape[0]
titles[titles['year']==1960].shape[0]
titles[(titles['year']>1950)&(titles['year']<1960)].shape[0]
print(titles[titles['title']=="Batman"]['year'].values)
cast[(cast['title']=="Inception")&(cast['year']==2010)].shape[0]
sum(cast[(cast['title']=="Inception")&(cast['year']==2010)]['n'].isnull())
sum(cast[(cast['title']=="Inception")&(cast['year']==2010)]['n'].notnull())
cast[cast['title']=='North by Northwest'].dropna().sort(columns='n')
cast[(cast['title']=='Sleuth')&(cast['year']==1972)].sort(columns='n')
cast[(cast['title']=='Sleuth')&(cast['year']==2007)].sort(columns='n')
cast[(cast['title']=='Hamlet')&(cast['year']==1921)].shape[0]
cast[(cast['title']=='Hamlet')&(cast['year']==1996)].shape[0]
cast[(cast['character']=='Hamlet')].shape[0]
cast[(cast['character']=='Ophelia')]['name'].unique().shape[0]
cast[(cast['character']=='The Dude')]['name'].unique().shape[0]
cast[(cast['character']=='The Stranger')]['name'].unique().shape[0]
cast[(cast['name']=='Sidney Poitier')]['character'].unique().shape[0]
cast[(cast['name']=='Judi Dench')]['character'].unique().shape[0]
cast[(cast['name']=='Cary Grant')&(cast['n']==2)&(cast['year']>=1940)&(cast['year']<1950)].sort(columns='year')
cast[(cast['name']=='Cary Grant')&(cast['n']==1)&(cast['year']>=1940)&(cast['year']<1950)].sort(columns='year')
cast[(cast['type']=='actor')&(cast['year']>=1940)&(cast['year']<1950)].shape[0]
cast[(cast['type']=='actress')&(cast['year']>=1940)&(cast['year']<1950)].shape[0]
cast[(cast['n']==1)&(cast['year']<=1980)].shape[0]
cast[(cast['n']!=1)&(cast['year']<=1980)].shape[0]
sum(cast[(cast['year']<=1980)]['n'].isnull())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating the autoencoder
Step2: Pre-training
Step3: Fine-tuning
Step4: Evaluation
Step5: The figure shows the corrupted examples and their reconstructions. The top half of the figure shows the ones corrupted with multiplicative noise, the bottom half shows the ones corrupted with additive noise. We can see that the autoencoders can provide decent reconstructions despite the heavy noise.
Step6: Now, we can use the autoencoder to initialize a supervised neural network. The network will have all the layer of the autoencoder up to (and including) the middle layer. We'll also add a softmax output layer. So, the network will look like
Step7: Next, we'll evaluate the accuracy on the test set
Step8: Convolutional Autoencoders
Step9: Now we'll pre-train the autoencoder
Step10: And then convert the autoencoder to a regular neural network for classification
Step11: And evaluate it on the test set
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
from scipy.io import loadmat
from shogun import RealFeatures, MulticlassLabels, Math
# load the dataset
dataset = loadmat(os.path.join(SHOGUN_DATA_DIR, 'multiclass/usps.mat'))
Xall = dataset['data']
# the usps dataset has the digits labeled from 1 to 10
# we'll subtract 1 to make them in the 0-9 range instead
Yall = np.array(dataset['label'].squeeze(), dtype=np.double)-1
# 4000 examples for training
Xtrain = RealFeatures(Xall[:,0:4000])
Ytrain = MulticlassLabels(Yall[0:4000])
# the rest for testing
Xtest = RealFeatures(Xall[:,4000:-1])
Ytest = MulticlassLabels(Yall[4000:-1])
# initialize the random number generator with a fixed seed, for repeatability
Math.init_random(10)
from shogun import NeuralLayers, DeepAutoencoder
layers = NeuralLayers()
layers = layers.input(256).rectified_linear(512).rectified_linear(128).rectified_linear(512).linear(256).done()
ae = DeepAutoencoder(layers)
from shogun import AENT_DROPOUT, NNOM_GRADIENT_DESCENT
ae.pt_noise_type.set_const(AENT_DROPOUT) # use dropout noise
ae.pt_noise_parameter.set_const(0.5) # each input has a 50% chance of being set to zero
ae.pt_optimization_method.set_const(NNOM_GRADIENT_DESCENT) # train using gradient descent
ae.pt_gd_learning_rate.set_const(0.01)
ae.pt_gd_mini_batch_size.set_const(128)
ae.pt_max_num_epochs.set_const(50)
ae.pt_epsilon.set_const(0.0) # disable automatic convergence testing
# uncomment this line to allow the training progress to be printed on the console
#from shogun import MSG_INFO; ae.io.set_loglevel(MSG_INFO)
# start pre-training. this might take some time
ae.pre_train(Xtrain)
ae.set_noise_type(AENT_DROPOUT) # same noise type we used for pre-training
ae.set_noise_parameter(0.5)
ae.set_max_num_epochs(50)
ae.set_optimization_method(NNOM_GRADIENT_DESCENT)
ae.set_gd_mini_batch_size(128)
ae.set_gd_learning_rate(0.0001)
ae.set_epsilon(0.0)
# start fine-tuning. this might take some time
_ = ae.train(Xtrain)
# get a 50-example subset of the test set
subset = Xtest[:,0:50].copy()
# corrupt the first 25 examples with multiplicative noise
subset[:,0:25] *= (random.random((256,25))>0.5)
# corrupt the other 25 examples with additive noise
subset[:,25:50] += random.random((256,25))
# obtain the reconstructions
reconstructed_subset = ae.reconstruct(RealFeatures(subset))
# plot the corrupted data and the reconstructions
figure(figsize=(10,10))
for i in range(50):
ax1=subplot(10,10,i*2+1)
ax1.imshow(subset[:,i].reshape((16,16)), interpolation='nearest', cmap = cm.Greys_r)
ax1.set_xticks([])
ax1.set_yticks([])
ax2=subplot(10,10,i*2+2)
ax2.imshow(reconstructed_subset[:,i].reshape((16,16)), interpolation='nearest', cmap = cm.Greys_r)
ax2.set_xticks([])
ax2.set_yticks([])
# obtain the weights matrix of the first hidden layer
# the 512 is the number of biases in the layer (512 neurons)
# the transpose is because numpy stores matrices in row-major format, and Shogun stores
# them in column major format
w1 = ae.get_layer_parameters(1)[512:].reshape(256,512).T
# visualize the weights between the first 100 neurons in the hidden layer
# and the neurons in the input layer
figure(figsize=(10,10))
for i in range(100):
ax1=subplot(10,10,i+1)
ax1.imshow(w1[i,:].reshape((16,16)), interpolation='nearest', cmap = cm.Greys_r)
ax1.set_xticks([])
ax1.set_yticks([])
from shogun import NeuralSoftmaxLayer
nn = ae.convert_to_neural_network(NeuralSoftmaxLayer(10))
nn.set_max_num_epochs(50)
nn.set_labels(Ytrain)
_ = nn.train(Xtrain)
from shogun import MulticlassAccuracy
predictions = nn.apply_multiclass(Xtest)
accuracy = MulticlassAccuracy().evaluate(predictions, Ytest) * 100
print "Classification accuracy on the test set =", accuracy, "%"
from shogun import DynamicObjectArray, NeuralInputLayer, NeuralConvolutionalLayer, CMAF_RECTIFIED_LINEAR
conv_layers = DynamicObjectArray()
# 16x16 single channel images
conv_layers.append_element(NeuralInputLayer(16,16,1))
# the first encoding layer: 5 feature maps, filters with radius 2 (5x5 filters)
# and max-pooling in a 2x2 region: its output will be 10 8x8 feature maps
conv_layers.append_element(NeuralConvolutionalLayer(CMAF_RECTIFIED_LINEAR, 5, 2, 2, 2, 2))
# the second encoding layer: 15 feature maps, filters with radius 2 (5x5 filters)
# and max-pooling in a 2x2 region: its output will be 20 4x4 feature maps
conv_layers.append_element(NeuralConvolutionalLayer(CMAF_RECTIFIED_LINEAR, 15, 2, 2, 2, 2))
# the first decoding layer: same structure as the first encoding layer
conv_layers.append_element(NeuralConvolutionalLayer(CMAF_RECTIFIED_LINEAR, 5, 2, 2))
# the second decoding layer: same structure as the input layer
conv_layers.append_element(NeuralConvolutionalLayer(CMAF_RECTIFIED_LINEAR, 1, 2, 2))
conv_ae = DeepAutoencoder(conv_layers)
conv_ae.pt_noise_type.set_const(AENT_DROPOUT) # use dropout noise
conv_ae.pt_noise_parameter.set_const(0.3) # each input has a 30% chance of being set to zero
conv_ae.pt_optimization_method.set_const(NNOM_GRADIENT_DESCENT) # train using gradient descent
conv_ae.pt_gd_learning_rate.set_const(0.002)
conv_ae.pt_gd_mini_batch_size.set_const(100)
conv_ae.pt_max_num_epochs[0] = 30 # max number of epochs for pre-training the first encoding layer
conv_ae.pt_max_num_epochs[1] = 10 # max number of epochs for pre-training the second encoding layer
conv_ae.pt_epsilon.set_const(0.0) # disable automatic convergence testing
# start pre-training. this might take some time
conv_ae.pre_train(Xtrain)
conv_nn = ae.convert_to_neural_network(NeuralSoftmaxLayer(10))
# train the network
conv_nn.set_epsilon(0.0)
conv_nn.set_max_num_epochs(50)
conv_nn.set_labels(Ytrain)
# start training. this might take some time
_ = conv_nn.train(Xtrain)
predictions = conv_nn.apply_multiclass(Xtest)
accuracy = MulticlassAccuracy().evaluate(predictions, Ytest) * 100
print "Classification accuracy on the test set =", accuracy, "%"
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.