text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The learner doesn't do any heavy lifting itself, it manages the creation a sub-graph
Step2: Fitting the learner puts three copies of the OLS estimator in the path
Step3: The main estimator, fitted on all data, gets stored into the
Step4: In the above loop, a sub-segment of P is updated by each sublearner
Step5: ML-Ensemble follows the Scikit-learn API, so if you wish to update any
Step6: <div class="alert alert-info"><h4>Note</h4><p>Updating the indexer on one learner updates the indexer on all</p></div>
Step7: Each sub-learner records fit and predict times during fitting, and if
Step8: Preprocessing
Step9: To build the learner we pass the name of the transformer as
Step10: We now repeat the above process to fit the learner, starting with fitting
Step11: Note that the cache now contains the transformers as well
Step12: And estimation data is collected on a partition basis
Step13: Parallel estimation
Step14: For a slightly more high-level API for parallel computation on a single
|
<ASSISTANT_TASK:>
Python Code:
from mlens.utils.dummy import OLS
from mlens.parallel import Learner, Job
from mlens.index import FoldIndex
indexer = FoldIndex(folds=2)
learner = Learner(estimator=OLS(),
indexer=indexer,
name='ols')
import os, tempfile
import numpy as np
X = np.arange(20).reshape(10, 2)
y = np.random.rand(10)
# Specify a cache directory
path = []
# Run the setup routine
learner.setup(X, y, 'fit')
# Run
for sub_learner in learner.gen_fit(X, y):
sub_learner.fit(path)
print("Cached items:\n%r" % path)
learner.collect(path)
path = []
P = np.zeros((y.shape[0], 2))
learner.setup(X, y, 'transform', n_left_concats=1)
for sub_learner in learner.gen_transform(X, P):
sub_learner.transform(path)
print('Output:')
print(P)
print()
job = Job(
job='predict',
stack=False,
split=True,
dir={},
targets=y,
predict_in=X,
predict_out=np.zeros((y.shape[0], 1))
)
learner.setup(job.predict_in, job.targets, job.job)
for sub_learner in learner(job.args(), 'main'):
sub_learner()
print('Output:')
print(job.predict_out)
print()
print("Params before:")
print(learner.get_params())
learner.set_params(estimator__offset=1, indexer__folds=3)
print("Params after:")
print(learner.get_params())
from mlens.index import SubsetIndex
def mse(y, p): return np.mean((y - p) ** 2)
indexer = SubsetIndex(partitions=2, folds=2, X=X)
learner = Learner(estimator=OLS(),
indexer=indexer,
name='subsemble-ols',
scorer=mse,
verbose=True)
job.job = 'fit'
job.predict_out = np.zeros((y.shape[0], 2))
learner.setup(job.predict_in, job.targets, job.job)
for sub_learner in learner(job.args(), 'main'):
sub_learner.fit()
print('Output:')
print(job.predict_out)
print()
learner.collect()
print("Data:\n%s" % learner.data)
from mlens.utils.dummy import Scale
from mlens.parallel import Transformer, Pipeline
pipeline = Pipeline([('trans', Scale())], return_y=True)
transformer = Transformer(estimator=pipeline,
indexer=indexer,
name='sc',
verbose=True)
learner = Learner(estimator=OLS(),
preprocess='sc',
indexer=indexer,
scorer=mse,
verbose=True)
# Reset the prediction output array
job.predict_out = np.zeros((y.shape[0], 2))
transformer.setup(job.predict_in, job.targets, job.job)
learner.setup(job.predict_in, job.targets, job.job)
# Turn split off when you don't want the args() call to spawn a new sub-cache
job.split = False
for subtransformer in transformer(job.args(), 'auxiliary'):
subtransformer()
for sublearner in learner(job.args(), 'main'):
sublearner()
transformer.collect()
learner.collect()
print("Cache:")
for item in job.dir['task_%i' % job._n_dir]:
print('{:20}{}'.format(*item))
print("Data:\n%s" % learner.data)
from multiprocessing.dummy import Pool
def run(est): est()
args = job.args()
job.predict_out = np.zeros((y.shape[0], 2))
job.job = 'predict'
Pool(4).map(run, list(learner(args, 'main')))
from mlens.parallel import run
print(
run(transformer, 'predict', X)
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: First write a number_to_words(n) function that takes an integer n between 1 and 1000 inclusive and returns a list of words for the number as described above
Step3: Now write a set of assert tests for your number_to_words function that verifies that it is working as expected.
Step5: Now define a count_letters(n) that returns the number of letters used to write out the words for all of the the numbers 1 to n inclusive.
Step6: Now write a set of assert tests for your count_letters function that verifies that it is working as expected.
Step7: Finally used your count_letters function to solve the original question.
|
<ASSISTANT_TASK:>
Python Code:
number={}
number[0]="zero"
number[1]="one"
number[2]="two"
number[3]="three"
number[4]='four'
number[5]='five'
number[6]='six'
number[7]='seven'
number[8]='eight'
number[9]='nine'
number[10]='ten'
number[11]='eleven'
number[12]='twelve'
number[13]='thirteen'
number[14]='fourteen'
number[15]='fifteen'
number[16]='teen'
number[20]='twenty'
number[30]='thirty'
number[40]='forty'
number[50]='fifty'
number[60]='sixty'
number[70]='seventy'
number[80]='eighty'
number[90]='ninety'
number[17]='and'
number[100]='hundred'
number[1000]='thousand'
num=[]
def number_to_words(n):
Given a number n between 1-1000 inclusive return a list of words for the number.
if n<16:
return number[n]
if n>15 and n<20:
num=number[n-10]+number[16]
return num
if n>19 and n<31:
num=number[20]+number[n-20]
return num
if n>29 and n<41:
num=number[30]+number[n-30]
return num
if n>39 and n<51:
num=number[40]+number[n-40]
return num
if n>49 and n<61:
num=number[50]+number[n-50]
return num
if n>59 and n<71:
num=number[60]+number[n-60]
return num
if n>69 and n<81:
num=number[70]+number[n-70]
return num
if n>79 and n<91:
num=number[80]+number[n-80]
return num
if n>89 and n<101:
num=number[90]+number[n-90]
return num
if n>99 and n<200:
num= number[1]+number[100]+ number[17] + number_to_words(n-100)
return num
if n>199 and n<300:
num= number[2]+number[100]+ number[17] + number_to_words(n-200)
return num
if n>299 and n<400:
num= number[3]+number[100]+ number[17] + number_to_words(n-300)
return num
if n>399 and n<500:
num= number[4]+number[100]+ number[17] + number_to_words(n-400)
return num
if n>499 and n<600:
num= number[5]+number[100]+ number[17] + number_to_words(n-500)
return num
if n>599 and n<700:
num= number[6]+number[100]+ number[17] + number_to_words(n-600)
return num
if n>699 and n<800:
num= number[7]+number[100]+ number[17] + number_to_words(n-700)
return num
if n>799 and n<900:
num= number[8]+number[100]+ number[17] + number_to_words(n-800)
return num
if n>899 and n<1000:
num= number[9]+number[100]+ number[17] + number_to_words(n-900)
return num
# number
# len(number[100])
number_to_words(767)
# YOUR CODE HERE
assert number_to_words(342)=='threehundredandfortytwo'
assert number_to_words(115)=='onehundredandfifteen'
assert True # use this for grading the number_to_words tests.
def count_letters(n):
Count the number of letters used to write out the words for 1-n inclusive.
count=0
while n>0:
L=number_to_words(n)
count=count+len(L)
n=n-1
return count
count_letters(115)
# YOUR CODE HERE
assert count_letters(5)==19
assert count_letters(10)==39
assert True # use this for grading the count_letters tests.
# YOUR CODE HERE
count_letters(999)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Purpose
Step2: <img src='https
Step3: python
Step4: Renderers can also have different modes. In this case we will instantiate the renderer in 'server' mode, which tells the Renderer to render the HoloViews object to a format that can easily be deployed as a server app. Before going into more detail about deploying server apps we will quickly remind ourselves how the renderer turns HoloViews objects into Bokeh models.
Step5: <LayoutPlot LayoutPlot01811>
Step6: Column(id='1570', ...)
Step7: Bokeh Documents
Step8: (<bokeh.document.Document at 0x11afc7590>,
Step9: In the background however, HoloViews uses the Panel library to render components to a Bokeh model which can be rendered in the notebook, to a file or on a server
Step10: For more information on the interaction between Panel and HoloViews see the the Panel documentation.
Step11: In addition to starting a server from a script we can also start up a server interactively, so let's do a quick deep dive into Bokeh Application and Server objects and how we can work with them from within HoloViews.
Step12: <bokeh.server.server.Server object at 0x10b3a0510>
Step13: After running the cell above you should have noticed a new browser window popping up displaying our plot. Once you are done playing with it you can stop it with
Step14: We can achieve the equivalent using the .show method on a Panel object
Step15: <img width='80%' src="https
Step16: Inlining apps in the notebook
Step17: <img width='80%' src='https
Step18: <img width='80%' src='https
Step19: Once started we can stop and start it at will using the .stop and .start methods
Step20: Combining Bokeh Application and Flask Application
Step21: We run load up our dynamic map into a Bokeh Application with the parameter allow_websocket_origin=["localhost
Step22: If instead we want to deploy this we could add .servable as discussed before or use pn.serve. Note however that when using pn.serve all sessions will share the same state therefore it is best to
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import holoviews as hv
hv.extension('bokeh')
# Declare some points
points = hv.Points(np.random.randn(1000,2 ))
# Declare points as source of selection stream
selection = hv.streams.Selection1D(source=points)
# Write function that uses the selection indices to slice points and compute stats
def selected_info(index):
arr = points.array()[index]
if index:
label = 'Mean x, y: %.3f, %.3f' % tuple(arr.mean(axis=0))
else:
label = 'No selection'
return points.clone(arr, label=label).opts(color='red')
# Combine points and DynamicMap
selected_points = hv.DynamicMap(selected_info, streams=[selection])
layout = points.opts(tools=['box_select', 'lasso_select']) + selected_points
layout
renderer = hv.renderer('bokeh')
print(renderer)
renderer = renderer.instance(mode='server')
hvplot = renderer.get_plot(layout)
print(hvplot)
hvplot.state
html = renderer._figure_data(hvplot, 'html')
renderer(layout)
doc = renderer.server_doc(layout)
doc.title = 'HoloViews App'
import panel as pn
model = pn.panel(layout).get_root()
model
hv.renderer('bokeh').server_doc(layout)
def sine(frequency, phase, amplitude):
xs = np.linspace(0, np.pi*4)
return hv.Curve((xs, np.sin(frequency*xs+phase)*amplitude)).opts(width=800)
ranges = dict(frequency=(1, 5), phase=(-np.pi, np.pi), amplitude=(-2, 2), y=(-2, 2))
dmap = hv.DynamicMap(sine, kdims=['frequency', 'phase', 'amplitude']).redim.range(**ranges)
server = pn.serve(dmap, start=False, show=False)
server.start()
server.show('/')
# Outside the notebook ioloop needs to be started
# from tornado.ioloop import IOLoop
# loop = IOLoop.current()
# loop.start()
server.stop()
server = pn.panel(dmap).show()
server.stop()
pn.panel(dmap).app('localhost:8888')
def sine(counter):
phase = counter*0.1%np.pi*2
xs = np.linspace(0, np.pi*4)
return hv.Curve((xs, np.sin(xs+phase))).opts(width=800)
counter = hv.streams.Counter()
dmap = hv.DynamicMap(sine, streams=[counter])
dmap_pane = pn.panel(dmap)
dmap_pane.app('localhost:8891')
def update():
counter.event(counter=counter.counter+1)
cb = dmap_pane.add_periodic_callback(update, period=200)
cb.stop()
def sine(frequency, phase, amplitude):
xs = np.linspace(0, np.pi*4)
return hv.Curve((xs, np.sin(frequency*xs+phase)*amplitude)).options(width=800)
ranges = dict(frequency=(1, 5), phase=(-np.pi, np.pi), amplitude=(-2, 2), y=(-2, 2))
dmap = hv.DynamicMap(sine, kdims=['frequency', 'phase', 'amplitude']).redim.range(**ranges)
pn.serve(dmap, websocket_origin='localhost:5000', port=5006, show=False)
import holoviews as hv
import numpy as np
import panel as pn
# Create the holoviews app again
def sine(phase):
xs = np.linspace(0, np.pi*4)
return hv.Curve((xs, np.sin(xs+phase))).opts(width=800)
stream = hv.streams.Stream.define('Phase', phase=0.)()
dmap = hv.DynamicMap(sine, streams=[stream])
start, end = 0, np.pi*2
slider = pn.widgets.FloatSlider(start=start, end=end, value=start, step=0.2, name="Phase")
# Create a slider and play buttons
def animate_update():
year = slider.value + 0.2
if year > end:
year = start
slider.value = year
def slider_update(event):
# Notify the HoloViews stream of the slider update
stream.event(phase=event.new)
slider.param.watch(slider_update, 'value')
def animate(event):
if button.name == '► Play':
button.name = '❚❚ Pause'
callback.start()
else:
button.name = '► Play'
callback.stop()
button = pn.widgets.Button(name='► Play', width=60, align='end')
button.on_click(animate)
callback = button.add_periodic_callback(animate_update, 50, start=False)
app = pn.Column(
dmap,
pn.Row(slider, button)
)
app
import numpy as np
import holoviews as hv
from bokeh.io import show, curdoc
from bokeh.layouts import layout
from bokeh.models import Slider, Button
renderer = hv.renderer('bokeh').instance(mode='server')
# Create the holoviews app again
def sine(phase):
xs = np.linspace(0, np.pi*4)
return hv.Curve((xs, np.sin(xs+phase))).opts(width=800)
stream = hv.streams.Stream.define('Phase', phase=0.)()
dmap = hv.DynamicMap(sine, streams=[stream])
# Define valid function for FunctionHandler
# when deploying as script, simply attach to curdoc
def modify_doc(doc):
# Create HoloViews plot and attach the document
hvplot = renderer.get_plot(dmap, doc)
# Create a slider and play buttons
def animate_update():
year = slider.value + 0.2
if year > end:
year = start
slider.value = year
def slider_update(attrname, old, new):
# Notify the HoloViews stream of the slider update
stream.event(phase=new)
start, end = 0, np.pi*2
slider = Slider(start=start, end=end, value=start, step=0.2, title="Phase")
slider.on_change('value', slider_update)
callback_id = None
def animate():
global callback_id
if button.label == '► Play':
button.label = '❚❚ Pause'
callback_id = doc.add_periodic_callback(animate_update, 50)
else:
button.label = '► Play'
doc.remove_periodic_callback(callback_id)
button = Button(label='► Play', width=60)
button.on_click(animate)
# Combine the holoviews plot and widgets in a layout
plot = layout([
[hvplot.state],
[slider, button]], sizing_mode='fixed')
doc.add_root(plot)
return doc
# To display in the notebook
show(modify_doc, notebook_url='localhost:8888')
# To display in a script
# doc = modify_doc(curdoc())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: Word counting
Step7: Write a function count_words that takes a list of words and returns a dictionary where the keys in the dictionary are the unique words in the list and the values are the word counts.
Step10: Write a function sort_word_counts that return a list of sorted word counts
Step11: Perform a word count analysis on Chapter 1 of Moby Dick, whose text can be found in the file mobydick_chapter1.txt
Step12: Create a "Cleveland Style" dotplot of the counts of the top 50 words using Matplotlib. If you don't know what a dotplot is, you will have to do some research...
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
wasteland =
APRIL is the cruellest month, breeding
Lilacs out of the dead land, mixing
Memory and desire, stirring
Dull roots with spring rain.
def tokenize(s, stop_words=None, punctuation='`~!@#$%^&*()_-+={[}]|\:;"<,>.?/}\t'):
# Split a string into a list of words, removing punctuation and stop words.
# # YOUR CODE HERE
words=[]
split=s.split('\n')
return split
assert tokenize("This, is the way; that things will end", stop_words=['the', 'is']) == \
['this', 'way', 'that', 'things', 'will', 'end']
wasteland =
APRIL is the cruellest month, breeding
Lilacs out of the dead land, mixing
Memory and desire, stirring
Dull roots with spring rain.
assert tokenize(wasteland, stop_words='is the of and') == \
['april','cruellest','month','breeding','lilacs','out','dead','land',
'mixing','memory','desire','stirring','dull','roots','with','spring',
'rain']
def count_words(data):
Return a word count dictionary from the list of words in data.
# YOUR CODE HERE
raise NotImplementedError()
start with first word add to dictionary and count =1
next word if its in dictionary increment count
else add to dictionary and count=1
assert count_words(tokenize('this and the this from and a a a')) == \
{'a': 3, 'and': 2, 'from': 1, 'the': 1, 'this': 2}
def sort_word_counts(wc):
Return a list of 2-tuples of (word, count), sorted by count descending.
# YOUR CODE HERE
raise NotImplementedError()
start with first word compare to second word if first is bigger keep in first
move to second if third is bigger second becomes first
if a swap happened move back to first and compare to second(the one that used to be third)
continue until you pass all the way through the data with no swaps
assert sort_word_counts(count_words(tokenize('this and a the this this and a a a'))) == \
[('a', 4), ('this', 3), ('and', 2), ('the', 1)]
# YOUR CODE HERE
raise NotImplementedError()
assert swc[0]==('i',43)
assert len(swc)==848
# YOUR CODE HERE
raise NotImplementedError()
assert True # use this for grading the dotplot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multipliziert Integers oder Floats mit 2
Step2: 1.Schreibe eine Funktion, die aus einer Liste, die grösste Zahl herauszieht. Es ist verboten mit "max" zu arbeiten.
Step3: 2.Schreibe eine Funktion, die alle Elemente einer Liste, addiert. Es ist verboten mit "sum" zu arbeiten.
Step4: 3.Schreibe eine Funktion, die alle Elemente einer Liste multipliziert.
Step5: 4.Schreibe eine Funktion, die einen String nimmt, und spiegelt. Also "hallo" zu "ollah".
Step6: 5.Schreibe eine Funktion, die prüft, ob eine Zahl in einer bestimmten Zahlenfolge zu finden ist.
Step7: 6.Lösche die mehrfach genannten Elemente aus der folgenden Liste.
Step8: 7.Drucke die geraden Zahlen aus der folgenden Liste aus
Step9: 8.Prüfe mit einer Funktionen, wieviele Grossbuchstaben in folgendem Satz zu finden sind.
|
<ASSISTANT_TASK:>
Python Code:
def test(element):
element = element * 2
return element
test(5)
lst = [3,7,14,222,6]
lst.reverse()
print(lst)
def maxi(element):
element.sort()
element.reverse()
return element[0]# ich könnte auch element.reverse weglassen und einfach return element[-1] - gibt mir das letzte Element.
maxi(lst)
def summe(mylist):
long_elem = 0
for elem in mylist:
long_elem = long_elem + elem
return long_elem
summe(lst)
def multi(Menge):
multi_elem = 1
for elem in Menge:
multi_elem = multi_elem * elem
return multi_elem
multi(lst)
elem = input('Bitte geben Sie ein Wort ein ')
wort = list(elem)
print(wort)
def umkehr(wort):
elem = input('Bitte geben Sie ein Wort ein ')
wort = list(elem)
wort.reverse()
print(wort)
umkehr(wort)
def mirror(mylist):
for elem in mylist:
return mylist[::-1]
liste = [45, 34, 64,45]
def such(zahlen):
zahl = int(input('Bitte geben Sie eine Zahl ein:'))
if zahl in zahlen:
return 'Treffer'
else:
return 'Kein Treffer'
such(liste)
def isinlist(zahl, liste):
for elem in lst:
if elem == number:
return true
return false
istinlist(34, liste)
isinlist(34, liste)
zahleliste = []
liste = [5,5,5,5,3,2,11,5]
def lösch(mehrfach):
new_mehrfach = []
for elem in mehrfach:
if elem in new_mehrfach:
new_mehrfach.append(elem)
else:
continue
print(new_mehrfach)
lösch(liste)
lst = [34,23,22,443,45,78,23,89,23]
def gerade(summe):
for elem in summe:
if elem % 2 == 0:
print(elem)
else:
continue
gerade(lst)
satz = "In Oesterreich zeichnet sich ein Rechtsrutsch ab. OeVP und FPOe haben stark zugelegt. Gemaess der neusten Hochrechnung ist die Partei von Sebastian Kurz mit 31,6 Prozent der Stimmen Wahlsiegerin, auf Platz zwei folgt die SPÖ (26,9 Prozent) vor der FPOe (26,0 Prozent)."
def counting_caps(XXXXX):
XXXXXX = 0
for XXXXXX in elem:
if XXXXXX.isupper():
XXXXXXX += 1
return XXXXXXXX
counting_caps(satz)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2:
Step3:
Step4:
Step5:
Step6: Exercise 2
Step7: Step 2b
Step8: Step 3
Step9: Notice that the sigmoid is never less than zero or greater than 1.
Step10: Keep your eye on the orange curve. This is for the case when the actual value of a row in the dataset is 0 (the banknote is a fake). If the banknote is a fake and say $\hat{y}$ is 7, then $sigmoid(\hat{y})$ is going to be close to 1, say 0.9. This means that the penalty is going to be very high because the orange curve increases rapidly in value as it approaches 1.
Step 5
Step11: Step 6
|
<ASSISTANT_TASK:>
Python Code:
# Import our usual libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
# OS-independent way to navigate the file system
# Data directory is one directory up in relation to directory of this notebook
data_dir_root = os.path.normpath(os.getcwd() + os.sep + os.pardir + os.sep + "Data")
# Where the file is
file_url = data_dir_root + os.sep + "forged-bank-notes.csv"
#file_url
# header=0 drops the header row in the csv file
data = pd.read_csv(file_url, header=0, names=['V1', 'V2', 'V3', 'V4', 'Genuine'])
# Number of rows and columns in the data
data.shape
# First few rows of the datastet
data.head()
# Scatter of V1 versus V2
positive = data[data['Genuine'].isin([1])]
negative = data[data['Genuine'].isin([0])]
fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positive['V1'], positive['V2'], s=30, c='b', marker='.', label='Genuine')
ax.scatter(negative['V1'], negative['V2'], s=30, c='r', marker='.', label='Forged')
ax.legend(loc='lower right')
ax.set_xlabel('V1')
ax.set_ylabel('V2')
plt.title('Bank Note Validation Based on Feature Values 1 and 2');
# Scatter of V3 versus V4
positive = data[data['Genuine'].isin([1])]
negative = data[data['Genuine'].isin([0])]
fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positive['V3'], positive['V4'], s=30, c='b', marker='+', label='Genuine')
ax.scatter(negative['V3'], negative['V4'], s=30, c='r', marker='s', label='Forged')
ax.legend(loc='lower right')
ax.set_xlabel('V3')
ax.set_ylabel('V4')
plt.title('Bank Note Validation Based on Feature Values V3 and V4');
# Scatter of V1 versus V4
positive = data[data['Genuine'].isin([1])]
negative = data[data['Genuine'].isin([0])]
fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positive['V1'], positive['V4'], s=30, c='b', marker='+', label='Genuine')
ax.scatter(negative['V1'], negative['V4'], s=30, c='r', marker='s', label='Forged')
ax.legend(loc='lower right')
ax.set_xlabel('V1')
ax.set_ylabel('V4')
plt.title('Bank Note Validation Based on Feature Values 1 and 4');
# Scatter of V2 versus V3
positive = data[data['Genuine'].isin([1])]
negative = data[data['Genuine'].isin([0])]
fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positive['V2'], positive['V3'], s=30, c='b', marker='+', label='Genuine')
ax.scatter(negative['V2'], negative['V3'], s=30, c='r', marker='s', label='Forged')
ax.legend(loc='lower right')
ax.set_xlabel('V2')
ax.set_ylabel('V3')
plt.title('Bank Note Validation Based on Feature Values V2 and V3');
# Scatter of Skewness versus Entropy
positive = data[data['Genuine'].isin([1])]
negative = data[data['Genuine'].isin([0])]
fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positive['V2'], positive['V4'], s=30, c='b', marker='+', label='Genuine')
ax.scatter(negative['V2'], negative['V4'], s=30, c='r', marker='s', label='Forged')
ax.legend(loc='lower right')
ax.set_xlabel('V2')
ax.set_ylabel('V4')
plt.title('Bank Note Validation Based on Feature Values V2 and V4');
# First few rows of the input
inputs = data[['V1', 'V2']]
inputs.head()
# First few rows of the output/target
output = data[['Genuine']]
output.head()
# Define the sigmoid function or transformation
# NOTE: ALSO PUT INTO THE SharedFunctions notebook
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# Plot the sigmoid function
# Generate the values to be plotted
x_vals = np.linspace(-10,10,1000)
y_vals = [sigmoid(x) for x in x_vals]
# Plot the values
fig, ax = plt.subplots(figsize=(12,6))
ax.plot(x_vals, y_vals, 'blue')
ax.grid()
# Draw some constant lines to aid visualization
plt.axvline(x=0, color='black')
plt.axhline(y=0.5, color='black')
plt.yticks(np.arange(0,1.1,0.1))
plt.xticks(np.arange(-10,11,1))
plt.xlabel(r'$\hat{y}$', fontsize=15)
plt.ylabel(r'$sigmoid(\hat{y})$', fontsize=15)
plt.title('The Sigmoid Transformation', fontsize=15)
ax.plot;
# Visualize the penalty function when y = 1 and y = 0
x_vals = np.linspace(0,1,100)
y_1_vals = -np.log(x_vals)
y_0_vals = -np.log(1 - x_vals)
fig, ax = plt.subplots(figsize=(12,6))
ax.grid()
ax.plot(x_vals, y_1_vals, color='blue', linestyle='solid', label='actual value of y = 1')
ax.plot(x_vals, y_0_vals, color='orange', linestyle='solid', label='actual value of y = 0')
plt.legend(loc='upper center')
plt.xlabel(r'$sigmoid(\hat{y})$', fontsize=15)
plt.ylabel('Penalty', fontsize=15)
ax.plot;
# Set up the training data
X_train = inputs.values
#X_train.shape
# Set up the target data
y = output.values
# Change the shape of y to suit scikit learn's requirements
y_train = np.array(list(y.squeeze()))
#y_train.shape
# Set up the logistic regression model from SciKit Learn
from sklearn.linear_model import LogisticRegression
# Solvers that seem to work well are 'liblinear' and 'newton-cg"
lr = LogisticRegression(C=100.0, random_state=0, solver='liblinear', verbose=2)
# Train the model and find the optimal parameter values
lr.fit(X_train, y_train)
# These are the optimal values of w0, w1 and w2
w0 = lr.intercept_[0]
w1 = lr.coef_.squeeze()[0]
w2 = lr.coef_.squeeze()[1]
print("w0: {}\nw1: {}\nw2: {}".format(w0, w1, w2))
# Genuine or fake for the entire data set
y_pred = lr.predict(X_train)
print(y_pred)
# How do the predictions compare with the actual labels on the data set?
y_train == y_pred
# The probabilities of [Genuine = 0, Genuine = 1]
y_pred_probs = lr.predict_proba(X_train)
print(y_pred_probs)
# Where did the model misclassify banknotes?
errors = data[data['Genuine'] != y_pred]
#errors
# Following Sonya Sawtelle
# (https://sdsawtelle.github.io/blog/output/week3-andrew-ng-machine-learning-with-python.html)
# This is the classifier boundary line when z=0
x1 = np.linspace(-6,6,100) # Array of exam1 value
x2 = (-w0/w2) - (w1/w2)*x1 # Corresponding V2 values along the line z=0
# Following Sonya Sawtelle
# (https://sdsawtelle.github.io/blog/output/week3-andrew-ng-machine-learning-with-python.html)
# Scatter of V1 versus V2
positive = data[data['Genuine'].isin([1])]
negative = data[data['Genuine'].isin([0])]
fig, ax = plt.subplots(figsize=(15,10))
#colors = ["r", "b"]
#la = ["Forged", "Genuine"]
#markers = [colors[gen] for gen in data['Genuine']] # this is a cool way to color the categories!
#labels = [la[gen] for gen in data['Genuine']]
#ax.scatter(data['V1'], data['V2'], color=markers, s=10, label=labels)
ax.scatter(positive['V1'], positive['V2'], s=30, c='b', marker='.', label='Genuine')
ax.scatter(negative['V1'], negative['V2'], s=30, c='r', marker='.', label='Forged')
ax.set_xlabel('V1')
ax.set_ylabel('V2')
# Now plot black circles around data points that were incorrectly predicted
ax.scatter(errors["V1"], errors["V2"], facecolors="none", edgecolors="m", s=80, label="Wrongly Classified")
# Finally plot the line which represents the decision boundary
ax.plot(x1, x2, color="green", linestyle="--", marker=None, label="boundary")
ax.legend(loc='upper right')
plt.title('Bank Note Validation Based on Feature Values 1 and 2');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'thu', 'ciesm', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font color='blue'> Notice
Step2: After you verified the project information, you can execute the pipeline. When the job is done, you will see the log infomration returned from the cluster.
Step3: Run the pipeline with the specific operation.
Step4: To check the processing status
Step5: To delete the cluster, you just need to set the cluster name and call the below function.
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
sys.path.append(os.getcwd().replace("notebooks", "cfncluster"))
## S3 input and output address.
s3_input_files_address = "s3://path/to/input folder"
s3_output_files_address = "s3://path/to/output folder"
## CFNCluster name
your_cluster_name = "cluster_name"
## The private key pair for accessing cluster.
private_key = "/path/to/private_key.pem"
## If delete cfncluster after job is done.
delete_cfncluster = False
import CFNClusterManager, ConnectionManager
## Create a new cluster
master_ip_address = CFNClusterManager.create_cfn_cluster(cluster_name=your_cluster_name)
ssh_client = ConnectionManager.connect_master(hostname=master_ip_address,
username="ec2-user",
private_key_file=private_key)
import PipelineManager
## You can call this function to check the disease names included in the annotation.
PipelineManager.check_disease_name()
## Define the disease name from the below list of disease names.
disease_name = "BreastCancer"
import PipelineManager
## define operation
## calculate: calculate correlation;"
## oslom_cluster: clustering the gene moudules;"
## print_oslom_cluster_json: print json files;"
## all: run all operations;"
operation = "all"
## run the pipeline
PipelineManager.run_analysis(ssh_client, disease_name, operation, s3_input_files_address, s3_output_files_address)
import PipelineManager
PipelineManager.check_processing_status(ssh_client)
import CFNClusterManager
if delete_cfncluster == True:
CFNClusterManager.delete_cfn_cluster(cluster_name=your_cluster_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def solve(N):
return bin(sum(int(i) for i in str(N)))[2:]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: Create a figure showing the 3 histograms (original & 2 sets of noise corrupted data – use
Step4: Take a subset of P = 100 observations and estimate the probability density p̂ of intensities
Step6: $P(\underline{x}) = \frac{1}{h^n} \frac{1}{p} \Sigma_{\alpha=1}^{p} H(\frac{\underline{x} - \underline{x}^{(\alpha)}}{h})$
Step7: Calculate the negative log-likelihood per datapoint of your estimator using 5000
Step8: 2) Repeat this procedure (without plotting) for a sequence of kernel widths h to get the mean
Step9: not plotted points have value = inf because
Step11: (c) Repeat the previous steps (a & b) for the Gaussian kernel with σ^2 = h.
Step12: Exercise 2
Step14: 1.2 Run Expectation-Maximization algorithm
Step15: 1.3 Run K-means algorithm
Step16: K-means clusters the data point by establishing a straight separation line. This cannot fully capture the nature of the data, e.g. the points around the lower left Gaussian, which actually belong to the upper right Gaussian.
Step17: 1.5 Repeat analysis for different $\sigma_1$ values
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.stats
import numpy as np
from scipy.ndimage import imread
import sys
# import image
img_orig = imread('testimg.jpg').flatten()
print("$img_orig")
print("shape: \t\t", img_orig.shape) # = vector
print("values: \t from ", img_orig.min(), " to ", img_orig.max(), "\n")
# "img" holds 3 vectors
img = np.zeros((3,img_orig.shape[0]))
print("$img")
print("shape: \t\t",img.shape)
std = [0, 0.05, 0.1]
for i in range(img.shape[1]):
# normalize => img[0]
img[0][i] = img_orig[i] / 255
# gaussian noise => img[1] img[2]
img[1][i] = img[0][i] + np.random.normal(0, std[1])
img[2][i] = img[0][i] + np.random.normal(0, std[2])
print(img[:, 0:4])
# histograms
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
for i, ax in enumerate(axes.flatten()):
plt.sca(ax)
plt.hist(img[i], 100, normed=1, alpha=0.75)
plt.xlim(-0.1, 1.1)
plt.ylim(0, 18)
plt.xlabel("value")
plt.ylabel("probability")
plt.title('img[{}]'.format(i))
# divide probablity space in 100 bins
nbins = 100
bins = np.linspace(0, 1, nbins+1)
# holds data equivalent to shown histograms (but cutted from 0 to 1)
elementsPerBin = np.zeros((3,nbins))
for i in range(3):
ind = np.digitize(img[i], bins)
elementsPerBin[i] = [len(img[i][ind == j]) for j in range(nbins)]
# counts number of elements from bin '0' to bin 'j'
sumUptoBinJ = np.asarray([[0 for i in range(nbins)] for i in range(3)])
for i in range(3):
for j in range(nbins):
sumUptoBinJ[i][j] = np.sum(elementsPerBin[i][0:j+1])
# plot
plt.figure(figsize=(15, 5))
for i in range(3):
plt.plot(sumUptoBinJ[i], '.-')
plt.legend(['img[0]', 'img[1]', 'img[2]'])
plt.xlabel('bin')
plt.ylabel('empirical distribution functions');
def H(vec, h):
(rectangular) histogram kernel function
vec = np.asarray(vec)
return np.asarray([1 if abs(x)<.5 else 0 for x in vec])
def P_est(x, h, data, kernel = H):
returns the probability that data contains values @ (x +- h/2)
n = 1 #= data.shape[1] #number of dimensions (for multidmensional data)
p = len(data)
return 1/(h**n)/p*np.sum(kernel((data - x)/h, h))
# take 10 data sets with 100 observations (indexes 100k to 101k)
# nomenclature: data_3(3, 10, 100) holds 3 times data(10, 100)
P = 100
offset = int(100000)
data_3 = np.zeros((3, 10,P))
for j in range(3):
for i in range(10):
data_3[j][i] = img[j][offset+i*P:offset+(i+1)*P]
print(data_3.shape)
# calculate probability estimation for (center +- h/2) on the 10 data sets
h = .15
nCenters = 101
Centers = np.linspace(0,1,nCenters)
fig, ax = plt.subplots(2,5,figsize=(15,6))
ax = ax.ravel()
for i in range(10):
ax[i].plot([P_est(center,h,data_3[0][i]) for center in Centers])
testdata = img[0][50000:55000]
# calculate average negative log likelihood for
def avg_NegLL(data, h, kernel=H):
sys.stdout.write(".")
average = 0
for i in range(10):
L_prob = [np.log(P_est(x,h,data[i],kernel)) for x in testdata]
negLL = -1*np.sum(L_prob)
average += negLL
average /= 10
return average
hs = np.linspace(0.001, 0.999, 20)
def plot_negLL(data_3=data_3, kernel=H):
fig = plt.figure(figsize=(12,8))
for j in range(3):
print("calc data[{}]".format(j))
LLs = [avg_NegLL(data_3[j],h,kernel=kernel) for h in hs]
plt.plot(hs,LLs)
print()
plt.legend(['img[0]', 'img[1]', 'img[2]'])
plt.show()
plot_negLL()
P = 500
data_3b = np.zeros((3, 10,P))
for j in range(3):
for i in range(10):
data_3b[j][i] = img[j][offset+i*P:offset+(i+1)*P]
plot_negLL(data_3=data_3b)
def Gaussian(x,h):
gaussian kernel function
return np.exp(-x**2/h/2)/np.sqrt(2*np.pi*h)
fig, ax = plt.subplots(2,5,figsize=(15,6))
h = .15
ax = ax.ravel()
for i in range(10):
ax[i].plot([P_est(center,h,data_3[0][i],kernel=Gaussian) for center in Centers])
hs = np.linspace(0.001, 0.4, 20)
plot_negLL(kernel=Gaussian)
plot_negLL(data_3=data_3b, kernel=Gaussian)
M = 2
w1, w2 = [2,2], [1,1] # means
sigma2 = 0.2 # standard deviations
N = 100
P1, P2 = 2/3, 1/3
def create_data(sigma1=0.7):
X = np.zeros((N, 2))
which_gaussian = np.zeros(N)
for n in range(N):
if np.random.rand() < P1: # sample from first Gaussian
X[n] = np.random.multivariate_normal(w1, np.eye(len(w1)) * sigma1**2)
which_gaussian[n] = 0
else: # sample from second Gaussian
X[n] = np.random.multivariate_normal(w2, np.eye(len(w2)) * sigma2**2)
which_gaussian[n] = 1
return X, which_gaussian
sigma1 = 0.7
X, which_gaussian = create_data(sigma1)
def plot_data(X, which_gaussian, centers, stds):
plt.scatter(*X[which_gaussian == 0].T, c='r', label='Cluster 1')
plt.scatter(*X[which_gaussian == 1].T, c='b', label='Cluster 2')
plt.plot(centers[0][0], centers[0][1], 'k+', markersize=15, label='Centers')
plt.plot(centers[1][0], centers[1][1], 'k+', markersize=15)
plt.gca().add_artist(plt.Circle(centers[0], stds[0], ec='k', fc='none'))
plt.gca().add_artist(plt.Circle(centers[1], stds[1], ec='k', fc='none'))
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plot_data(X, which_gaussian, [w1, w2], [sigma1, sigma2])
plt.title('Ground truth')
from scipy.stats import multivariate_normal
def variance(X):
Calculate a single variance value for the vectors in X.
mu = X.mean(axis=0)
return np.mean([np.linalg.norm(x - mu)**2 for x in X])
def run_expectation_maximization(X, w=None, sigma_squared=None, verbose=False):
# Initialization.
P_prior = np.ones(2) * 1 / M
P_likelihood = np.zeros((N, M))
P_posterior = np.zeros((M, N))
mu = X.mean(axis=0) # mean of the original data
var = variance(X) # variance of the original data
if w is None:
w = np.array([mu + np.random.rand(M) - 0.5, mu + np.random.rand(M) - 0.5])
if sigma_squared is None:
sigma_squared = np.array([var + np.random.rand() - 0.5,var + np.random.rand() - 0.5])
#sigma_squared = np.array([var, var])
if verbose:
print('Initial centers:', w)
print('Initial variances:', sigma_squared)
print()
print()
theta = 0.001
distance = np.inf
step = 0
# Optimization loop.
while distance > theta:
#for i in range(1):
step += 1
if verbose:
print('Step', step)
print('-'*50)
# Store old parameter values to calculate distance later on.
w_old = w.copy()
sigma_squared_old = sigma_squared.copy()
P_prior_old = P_prior.copy()
if verbose:
print('Distances of X[0] to proposed centers:', np.linalg.norm(X[0] - w[0]), np.linalg.norm(X[0] - w[1]))
# E-Step: Calculate likelihood for each data point.
for (alpha, q), _ in np.ndenumerate(P_likelihood):
P_likelihood[alpha, q] = multivariate_normal.pdf(X[alpha], w[q], sigma_squared[q])
if verbose:
print('Likelihoods of X[0]:', P_likelihood[0])
# E-Step: Calculate assignment probabilities (posterior) for each data point.
for (q, alpha), _ in np.ndenumerate(P_posterior):
P_posterior[q, alpha] = (P_likelihood[alpha, q] * P_prior[q]) / np.sum([P_likelihood[alpha, r] * P_prior[r] for r in range(M)])
if verbose:
print('Assignment probabilities of X[0]:', P_posterior[:, 0])
print()
distance = 0
# M-Step: Calculate new parameter values.
for q in range(M):
w[q] = np.sum([P_posterior[q, alpha] * X[alpha] for alpha in range(N)], axis=0) / np.sum(P_posterior[q])
#print(np.sum([P_posterior[q, alpha] * X[alpha] for alpha in range(N)], axis=0))
#print(np.sum(P_posterior[q]))
w_distance = np.linalg.norm(w[q] - w_old[q])
if verbose:
print('Distance of centers:', w_distance)
distance = max(distance, w_distance)
sigma_squared[q] = 1 / M * np.sum([np.linalg.norm(X[alpha] - w_old[q])**2 * P_posterior[q, alpha] for alpha in range(N)]) / np.sum(P_posterior[q])
sigma_squared_distance = np.abs(sigma_squared[q] - sigma_squared_old[q])
if verbose:
print('Distance of variances:', sigma_squared_distance)
distance = max(distance, sigma_squared_distance)
P_prior[q] = np.mean(P_posterior[q])
P_prior_distance = np.abs(P_prior[q] - P_prior_old[q])
if verbose:
print('Distance of priors:', P_prior_distance)
distance = max(distance, P_prior_distance)
if verbose:
print('Maximum distance:', distance)
print()
print('New centers:', w)
print('New variances:', sigma_squared)
print('New priors:', P_prior)
print('='*50)
print()
which_gaussian_EM = P_posterior.argmax(axis=0)
return which_gaussian_EM, w, np.sqrt(sigma_squared), step
which_gaussian_em, cluster_centers_em, cluster_stds_em, num_steps_em = run_expectation_maximization(X, verbose=True)
plot_data(X, which_gaussian_em, cluster_centers_em, cluster_stds_em)
plt.title('Predicted by Expectation-Maximization')
from sklearn.cluster import KMeans
def run_k_means(X):
km = KMeans(2)
km.fit(X)
which_gaussian_km = km.predict(X)
cluster_stds = np.array([np.sqrt(variance(X[which_gaussian_km == 0])), np.sqrt(variance(X[which_gaussian_km == 1]))])
return which_gaussian_km, km.cluster_centers_, cluster_stds
which_gaussian_km, cluster_centers_km, cluster_stds_km = run_k_means(X)
plot_data(X, which_gaussian_km, cluster_centers_km, cluster_stds_km)
plt.title('Predicted by K-Means')
_, _, _, num_steps_em_km = run_expectation_maximization(X, cluster_centers_km, cluster_stds_km**2)
print('Took', num_steps_em, 'steps with random initalization')
print('Took', num_steps_em_km, 'steps with initialization from K-means')
sigma1s = [0.1, 0.5, 1, 1.5]
fig, axes = plt.subplots(len(sigma1s), 3, figsize=(15, 15), sharex=True, sharey=True)
for i, (sigma1, horizontal_axes) in enumerate(zip(sigma1s, axes)):
X, which_gaussian = create_data(sigma1)
plt.sca(horizontal_axes[0])
plot_data(X, which_gaussian, [w1, w2], [sigma1, sigma2])
if i == 0:
plt.title('Ground truth')
which_gaussian_em, cluster_centers_em, cluster_stds_em, num_steps_em = run_expectation_maximization(X)
plt.sca(horizontal_axes[1])
plot_data(X, which_gaussian_em, cluster_centers_em, cluster_stds_em)
if i == 0:
plt.title('Predicted by Expectation-Maximization')
which_gaussian_km, cluster_centers_km, cluster_stds_km = run_k_means(X)
plt.sca(horizontal_axes[2])
plot_data(X, which_gaussian_km, cluster_centers_km, cluster_stds_km)
if i == 0:
plt.title('Predicted by K-Means')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Looking at the above plot, it is clear that when the sample size is small, there is greater variation in the average (compare how jagged and jumpy the average is initially, then smooths out). All three paths approach the value 4.5, but just flirt with it as $N$ gets large. Mathematicians and statistician have another name for flirting
Step3: As expected, the expected distance between our sample average and the actual expected value shrinks as $N$ grows large. But also notice that the rate of convergence decreases, that is, we need only 10 000 additional samples to move from 0.020 to 0.015, a difference of 0.005, but 20 000 more samples to again decrease from 0.015 to 0.010, again only a 0.005 decrease.
Step4: What does this all have to do with Bayesian statistics?
Step5: What do we observe? Without accounting for population sizes we run the risk of making an enormous inference error
Step6: Not at all uniform over 100 to 1500. This is an absolute failure of the Law of Large Numbers.
Step8: The above is a classic phenomenon in statistics. I say classic referring to the "shape" of the scatter plot above. It follows a classic triangular form, that tightens as we increase the sample size (as the Law of Large Numbers becomes more exact).
Step10: For a given true upvote ratio $p$ and $N$ votes, the number of upvotes will look like a Binomial random variable with parameters $p$ and $N$. (This is because of the equivalence between upvote ratio and probability of upvoting versus downvoting, out of $N$ possible votes/trials). We create a function that performs Bayesian inference on $p$, for a particular comment's upvote/downvote pair.
Step11: Below are the resulting posterior distributions.
Step12: Some distributions are very tight, others have very long tails (relatively speaking), expressing our uncertainty with what the true upvote ratio might be.
Step13: The best submissions, according to our procedure, are the submissions that are most-likely to score a high percentage of upvotes. Visually those are the submissions with the 95% least plausible value close to 1.
Step14: We can view the ordering visually by plotting the posterior mean and bounds, and sorting by the lower bound. In the plot below, notice that the left error-bar is sorted (as we suggested this is the best way to determine an ordering), so the means, indicated by dots, do not follow any strong pattern.
Step15: In the graphic above, you can see why sorting by mean would be sub-optimal.
Step16: 2. The following table was located in the paper "Going for Three
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from IPython.core.pylabtools import figsize
import matplotlib.pyplot as plt
figsize(12.5, 5)
import pymc as pm
sample_size = 100000
expected_value = lambda_ = 4.5
poi = pm.rpoisson
N_samples = range(1, sample_size, 100)
for k in range(3):
samples = poi(lambda_, size=sample_size)
partial_average = [samples[:i].mean() for i in N_samples]
plt.plot(N_samples, partial_average, lw=1.5, label="average \
of $n$ samples; seq. %d" % k)
plt.plot(N_samples, expected_value * np.ones_like(partial_average),
ls="--", label="true expected value", c="k")
plt.ylim(4.35, 4.65)
plt.title("Convergence of the average of \n random variables to its \
expected value")
plt.ylabel("average of $n$ samples")
plt.xlabel("# of samples, $n$")
plt.legend();
figsize(12.5, 4)
N_Y = 250 # use this many to approximate D(N)
N_array = np.arange(1000, 50000, 2500) # use this many samples in the approx. to the variance.
D_N_results = np.zeros(len(N_array))
lambda_ = 4.5
expected_value = lambda_ # for X ~ Poi(lambda) , E[ X ] = lambda
def D_N(n):
This function approx. D_n, the average variance of using n samples.
Z = poi(lambda_, size=(n, N_Y))
average_Z = Z.mean(axis=0)
return np.sqrt(((average_Z - expected_value) ** 2).mean())
for i, n in enumerate(N_array):
D_N_results[i] = D_N(n)
plt.xlabel("$N$")
plt.ylabel("expected squared-distance from true value")
plt.plot(N_array, D_N_results, lw=3,
label="expected distance between\n\
expected value and \naverage of $N$ random variables.")
plt.plot(N_array, np.sqrt(expected_value) / np.sqrt(N_array), lw=2, ls="--",
label=r"$\frac{\sqrt{\lambda}}{\sqrt{N}}$")
plt.legend()
plt.title("How 'fast' is the sample average converging? ");
import pymc as pm
N = 10000
print(np.mean([pm.rexponential(0.5) > 10 for i in range(N)]))
figsize(12.5, 4)
std_height = 15
mean_height = 150
n_counties = 5000
pop_generator = pm.rdiscrete_uniform
norm = pm.rnormal
# generate some artificial population numbers
population = pop_generator(100, 1500, size=n_counties)
average_across_county = np.zeros(n_counties)
for i in range(n_counties):
# generate some individuals and take the mean
average_across_county[i] = norm(mean_height, 1. / std_height ** 2,
size=population[i]).mean()
# located the counties with the apparently most extreme average heights.
i_min = np.argmin(average_across_county)
i_max = np.argmax(average_across_county)
# plot population size vs. recorded average
plt.scatter(population, average_across_county, alpha=0.5, c="#7A68A6")
plt.scatter([population[i_min], population[i_max]],
[average_across_county[i_min], average_across_county[i_max]],
s=60, marker="o", facecolors="none",
edgecolors="#A60628", linewidths=1.5,
label="extreme heights")
plt.xlim(100, 1500)
plt.title("Average height vs. County Population")
plt.xlabel("County Population")
plt.ylabel("Average height in county")
plt.plot([100, 1500], [150, 150], color="k", label="true expected \
height", ls="--")
plt.legend(scatterpoints=1);
print("Population sizes of 10 'shortest' counties: ")
print(population[np.argsort(average_across_county)[:10]])
print("\nPopulation sizes of 10 'tallest' counties: ")
print(population[np.argsort(-average_across_county)[:10]])
figsize(12.5, 6.5)
data = np.genfromtxt("./data/census_data.csv", skip_header=1,
delimiter=",")
plt.scatter(data[:, 1], data[:, 0], alpha=0.5, c="#7A68A6")
plt.title("Census mail-back rate vs Population")
plt.ylabel("Mail-back rate")
plt.xlabel("population of block-group")
plt.xlim(-100, 15e3)
plt.ylim(-5, 105)
i_min = np.argmin(data[:, 0])
i_max = np.argmax(data[:, 0])
plt.scatter([data[i_min, 1], data[i_max, 1]],
[data[i_min, 0], data[i_max, 0]],
s=60, marker="o", facecolors="none",
edgecolors="#A60628", linewidths=1.5,
label="most extreme points")
plt.legend(scatterpoints=1);
# adding a number to the end of the %run call with get the ith top photo.
%run top_showerthoughts_submissions.py 2
print("Post contents: \n")
print(top_post)
contents: an array of the text from the last 100 top submissions to a subreddit
votes: a 2d numpy array of upvotes, downvotes for each submission.
n_submissions = len(votes)
submissions = np.random.randint( n_submissions, size=4)
print("Some Submissions (out of %d total) \n-----------"%n_submissions)
for i in submissions:
print('"' + contents[i] + '"')
print("upvotes/downvotes: ",votes[i,:], "\n")
import pymc as pm
def posterior_upvote_ratio(upvotes, downvotes, samples=20000):
This function accepts the number of upvotes and downvotes a particular submission received,
and the number of posterior samples to return to the user. Assumes a uniform prior.
N = upvotes + downvotes
upvote_ratio = pm.Uniform("upvote_ratio", 0, 1)
observations = pm.Binomial("obs", N, upvote_ratio, value=upvotes, observed=True)
# do the fitting; first do a MAP as it is cheap and useful.
map_ = pm.MAP([upvote_ratio, observations]).fit()
mcmc = pm.MCMC([upvote_ratio, observations])
mcmc.sample(samples, samples / 4)
return mcmc.trace("upvote_ratio")[:]
figsize(11., 8)
posteriors = []
colours = ["#348ABD", "#A60628", "#7A68A6", "#467821", "#CF4457"]
for i in range(len(submissions)):
j = submissions[i]
posteriors.append(posterior_upvote_ratio(votes[j, 0], votes[j, 1]))
plt.hist(posteriors[i], bins=18, normed=True, alpha=.9,
histtype="step", color=colours[i % 5], lw=3,
label='(%d up:%d down)\n%s...' % (votes[j, 0], votes[j, 1], contents[j][:50]))
plt.hist(posteriors[i], bins=18, normed=True, alpha=.2,
histtype="stepfilled", color=colours[i], lw=3, )
plt.legend(loc="upper left")
plt.xlim(0, 1)
plt.title("Posterior distributions of upvote ratios on different submissions");
N = posteriors[0].shape[0]
lower_limits = []
for i in range(len(submissions)):
j = submissions[i]
plt.hist(posteriors[i], bins=20, normed=True, alpha=.9,
histtype="step", color=colours[i], lw=3,
label='(%d up:%d down)\n%s...' % (votes[j, 0], votes[j, 1], contents[j][:50]))
plt.hist(posteriors[i], bins=20, normed=True, alpha=.2,
histtype="stepfilled", color=colours[i], lw=3, )
v = np.sort(posteriors[i])[int(0.05 * N)]
# plt.vlines( v, 0, 15 , color = "k", alpha = 1, linewidths=3 )
plt.vlines(v, 0, 10, color=colours[i], linestyles="--", linewidths=3)
lower_limits.append(v)
plt.legend(loc="upper left")
plt.legend(loc="upper left")
plt.title("Posterior distributions of upvote ratios on different submissions");
order = np.argsort(-np.array(lower_limits))
print(order, lower_limits)
def intervals(u, d):
a = 1. + u
b = 1. + d
mu = a / (a + b)
std_err = 1.65 * np.sqrt((a * b) / ((a + b) ** 2 * (a + b + 1.)))
return (mu, std_err)
print("Approximate lower bounds:")
posterior_mean, std_err = intervals(votes[:, 0], votes[:, 1])
lb = posterior_mean - std_err
print(lb)
print("\n")
print("Top 40 Sorted according to approximate lower bounds:")
print("\n")
order = np.argsort(-lb)
ordered_contents = []
for i in order[:40]:
ordered_contents.append(contents[i])
print(votes[i, 0], votes[i, 1], contents[i])
print("-------------")
r_order = order[::-1][-40:]
plt.errorbar(posterior_mean[r_order], np.arange(len(r_order)),
xerr=std_err[r_order], capsize=0, fmt="o",
color="#7A68A6")
plt.xlim(0.3, 1)
plt.yticks(np.arange(len(r_order) - 1, -1, -1), map(lambda x: x[:30].replace("\n", ""), ordered_contents));
# Enter code here
import scipy.stats as stats
exp = stats.expon(scale=4)
N = int(1e5)
X = exp.rvs(N)
# ...
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'giss-e2-1h', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Factorial
Step4: Write a function that computes the factorial of small numbers using a Python loop.
Step5: Use the %timeit magic to time both versions of this function for an argument of 50. The syntax for %timeit is
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
def np_fact(n):
Compute n! = n*(n-1)*...*1 using Numpy.
if n == 0:
return 1
else:
a = np.arange(1,n+1,1)
b = a.cumprod(0)
return b[n-1]
assert np_fact(0)==1
assert np_fact(1)==1
assert np_fact(10)==3628800
assert [np_fact(i) for i in range(0,11)]==[1,1,2,6,24,120,720,5040,40320,362880,3628800]
def loop_fact(n):
Compute n! using a Python for loop.
if n == 0:
return 1
else:
factorial = 1
for i in range(1,n+1):
factorial *= i
return factorial
assert loop_fact(0)==1
assert loop_fact(1)==1
assert loop_fact(10)==3628800
assert [loop_fact(i) for i in range(0,11)]==[1,1,2,6,24,120,720,5040,40320,362880,3628800]
%timeit -n1 -r1 np_fact(100)
%timeit -n1 -r1 loop_fact(100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hack that alows to parse ltl3ba automata without universal branching.
Step2: $\newcommand{\F}{\mathsf{F}}$
Step3: Literature
Step4: Mergeable formulae
Step5: Random
Step6: Generate 1000 mergeable formulae with priorities 1,2,4
Step7: Evaluating the impact of $\F$- and $\FG$-merging
Step8: Scatter plots
|
<ASSISTANT_TASK:>
Python Code:
from ltlcross_runner import LtlcrossRunner
from IPython.display import display
import pandas as pd
import spot
import sys
spot.setup(show_default='.a')
pd.options.display.float_format = '{: .0f}'.format
pd.options.display.latex.multicolumn_format = 'c'
import os
os.environ['SPOT_HOA_TOLERANT']='TRUE'
%%bash
ltl3ba -v
ltl3tela -v
ltl2tgba --version
# If there are already files with results, and rerun is False, ltlcross is not run again.
rerun = False
def is_mergable(f, level=3):
'''Runs ltl3tela with the -m argument to detect
whether the given formula `f` is mergable.
level 1: F-mergeable
level 2: G-mergeable
level 3: F,G-mergeable
'''
if level == 3:
return is_mergable(f,2) or is_mergable(f,1)
res = !ltl3tela -m{level} -f "{f}"
return res[0] == '1'
is_mergable('FGa',2)
tmp_file = 'formulae/tmp.ltl'
lit_pref = 'formulae/literature'
lit_file = lit_pref + '.ltl'
lit_merg_file = 'formulae/lit.ltl'
# The well-known set of formulae from literature
!genltl --dac-patterns --eh-patterns --sb-patterns --beem-patterns --hkrss-patterns > $tmp_file
# We add also negation of all the formulae.
# We remove all M and W operators as LTL3BA does not understand them.
# The `relabel-bool` option renames `G(a | b)` into `G a`.
!ltlfilt --negate $tmp_file | \
ltlfilt $tmp_file -F - --unique -r3 --remove-wm --relabel-bool=abc | \
ltlfilt -v --equivalent-to=0 | ltlfilt -v --equivalent-to=1> $lit_file
lit_f_mergable = [is_mergable(l,1) for l in open(lit_file)]
lit_mergable = [is_mergable(l,3) for l in open(lit_file)]
counts = '''Out of {} formulae known from literature, there are:
{} with F-merging,
{} with F,G-merging, and
{} with no merging possibility
'''
print(counts.format(
len(lit_mergable),
lit_f_mergable.count(True),
lit_mergable.count(True),
lit_mergable.count(False)))
with open(lit_merg_file,'w') as out:
for l in open(lit_file):
if is_mergable(l):
out.write(l)
def generate(n=100,func=(lambda x: True),filename=None,priorities='M=0,W=0,xor=0',ap=['a','b','c','d','e']):
if filename is not None:
if filename is sys.stdout:
file_h = filename
else:
file_h = open(filename,'w')
f = spot.randltl(ap,
ltl_priorities=priorities,
simplify=3,tree_size=15).relabel_bse(spot.Abc)\
.unabbreviate('WM')
i = 0
printed = set()
while(i < n):
form = next(f)
if form in printed:
continue
if func(form) and not form.is_tt() and not form.is_ff():
if filename is not None:
print(form,file=file_h)
printed.add(form)
i += 1
return list(printed)
def measure_rand(n=1000,priorities='M=0,W=0,xor=0',ap=['a','b','c','d','e']):
rand = generate(n,priorities=priorities,ap=ap)
rand_mergable = [is_mergable(l,3) for l in rand]
rand_f_mergable = [is_mergable(l,1) for l in rand]
counts = '''Out of {} random formulae, there are:
{} with F-merging,
{} with F,G-merging, and
{} with no merging possibility
'''
print(counts.format(
len(rand_mergable),
rand_f_mergable.count(True),
rand_mergable.count(True),
rand_mergable.count(False)))
return rand, rand_f_mergable, rand_mergable
def get_priorities(n):
'''Returns the `priority string` for ltlcross
where `n` is the priority of both F and G. The
operators W,M,xor have priority 0 and the rest
has the priority 1.
'''
return 'M=0,W=0,xor=0,G={0},F={0}'.format(n)
measure_rand();
measure_rand(priorities=get_priorities(2));
rand4 = measure_rand(priorities=get_priorities(4))
randfg = measure_rand(priorities='xor=0,implies=0,equiv=0,X=0,W=0,M=0,R=0,U=0,F=2,G=2')
fg_priorities = [1,2,4]
!mkdir -p formulae
#generate(total_r,filename=fg_f,priorities='xor=0,implies=0,equiv=0,X=0,W=0,M=0,R=0,U=0,F=3,G=3');
for i in fg_priorities:
generate(1000,func=lambda x:is_mergable(x,3),
filename='formulae/rand{}.ltl'.format(i),
priorities=get_priorities(i))
generate(1000,func=lambda x:is_mergable(x,3),
filename='formulae/randfg.ltl'.format(i),
priorities='xor=0,implies=0,equiv=0,X=0,W=0,M=0,R=0,U=0,F=2,G=2');
resfiles = {}
runners = {}
### Tools' setting ###
# a dict of a form (name : ltlcross cmd)
ltl3tela_shared = "ltl3tela -p1 -t0 -n0 -a3 -f %f "
#end = " | awk '!p;/^--END--/{p=1}' > %O"
end = " > %O"
tools = {"FG-merging" : ltl3tela_shared + end,
#"FG-merging+compl" : ltl3tela_shared + "-n1" + end,
"F-merging" : ltl3tela_shared + "-G0" + end,
#"G-merging" : ltl3tela_shared + "-F0" + end,
"basic" : ltl3tela_shared + "-F0 -G0" + end,
"LTL3BA" : "ltl3ba -H1 -f %s" + end,
}
### Order in which we want to sort the translations
MI_order = ["LTL3BA",
"basic","F-merging","FG-merging"]
### Files with measured statistics ###
resfiles['lit'] = 'MI_alt-lit.csv'
resfiles['randfg'] = 'MI_alt-randfg.csv'
for i in fg_priorities:
resfiles['rand{}'.format(i)] = 'MI_alt-rand{}.csv'.format(i)
### Measures to be measured
cols = ["states","transitions","nondet_states","nondet_aut","acc"]
for name,rfile in resfiles.items():
runners[name] = LtlcrossRunner(tools,res_filename=rfile,
formula_files=['formulae/{}.ltl'.format(name)],
cols=cols)
for r in runners.values():
if rerun:
r.run_ltlcross()
r.parse_results()
t1 = {}
for name,r in runners.items():
tmp = r.cummulative(col=cols).unstack(level=0).loc[MI_order,cols]
t1_part = tmp.loc[:,['states','acc']]
t1_part["det. automata"] = len(r.values)-tmp.nondet_aut
t1[name] = t1_part
t1_merged = pd.concat(t1.values(),axis=1,keys=t1.keys()).loc[MI_order,:]
t1_merged
row_map={"basic" : 'basic',
"F-merging" : '$\F$-merging',
"G-merging" : '$\G$-merging',
"FG-merging" : '$\FG$-merging',
"FG-merging+compl" : "$\FG$-merging + complement"}
t1_merged.rename(row_map,inplace=True);
t1 = t1_merged.rename_axis(['',"translation"],axis=1)
t1.index.name = None
t1
rand = t1.copy()
rand.columns = rand.columns.swaplevel()
rand.sort_index(axis=1,level=1,inplace=True,sort_remaining=False,ascending=True)
idx = pd.IndexSlice
corder = ['states','acc']
parts = [rand.loc[:,idx[[c]]] for c in corder]
rand = pd.concat(parts,names=corder,axis=1)
rand
print(rand.to_latex(escape=False,bold_rows=False),file=open('fossacs_t1.tex','w'))
cp fossacs_t1.tex /home/xblahoud/research/ltl3tela_papers/
def fix_tools(tool):
return tool.replace('FG-','$\\FG$-').replace('F-','$\\F$-')
def sc_plot(r,t1,t2,filename=None,include_equal = True,col='states',log=None,size=(5.5,5),kw=None,clip=None, add_count=True):
merged = isinstance(r,list)
if merged:
vals = pd.concat([run.values[col] for run in r])
vals.index = vals.index.droplevel(0)
vals = vals.groupby(vals.index).first()
else:
vals = r.values[col]
to_plot = vals.loc(axis=1)[[t1,t2]] if include_equal else\
vals[vals[t1] != vals[t2]].loc(axis=1)[[t1,t2]]
to_plot['count'] = 1
to_plot.dropna(inplace=True)
to_plot = to_plot.groupby([t1,t2]).count().reset_index()
if filename is not None:
print(scatter_plot(to_plot, log=log, size=size,kw=kw,clip=clip, add_count=add_count),file=open(filename,'w'))
else:
return scatter_plot(to_plot, log=log, size=size,kw=kw,clip=clip, add_count=add_count)
def scatter_plot(df, short_toolnames=True, log=None, size=(5.5,5),kw=None,clip=None,add_count = True):
t1, t2, _ = df.columns.values
if short_toolnames:
t1 = fix_tools(t1.split('/')[0])
t2 = fix_tools(t2.split('/')[0])
vals = ['({},{}) [{}]\n'.format(v1,v2,c) for v1,v2,c in df.values]
plots = '''\\addplot[
scatter, scatter src=explicit,
only marks, fill opacity=0.5,
draw opacity=0] coordinates
{{{}}};'''.format(' '.join(vals))
start_line = 0 if log is None else 1
line = '\\addplot[darkgreen,domain={}:{}]{{x}};'.format(start_line, min(df.max(axis=0)[:2])+1)
axis = 'axis'
mins = 'xmin=0,ymin=0,'
clip_str = ''
if clip is not None:
clip_str = '\\draw[red,thick] ({},{}) rectangle ({},{});'.format(*clip)
if log:
if log == 'both':
axis = 'loglogaxis'
mins = 'xmin=1,ymin=1,'
else:
axis = 'semilog{}axis'.format(log)
mins = mins + '{}min=1,'.format(log)
args = ''
if kw is not None:
if 'title' in kw and add_count:
kw['title'] = '{{{} ({})}}'.format(kw['title'],df['count'].sum())
args = ['{}={},\n'.format(k,v) for k,v in kw.items()]
args = ''.join(args)
res = '''%\\begin{{tikzpicture}}
\\pgfplotsset{{every axis legend/.append style={{
cells={{anchor=west}},
draw=none,
}}}}
\\pgfplotsset{{colorbar/width=.3cm}}
\\pgfplotsset{{title style={{align=center,
font=\\small}}}}
\\pgfplotsset{{compat=1.14}}
\\begin{{{0}}}[
{1}
colorbar,
colormap={{example}}{{
color(0)=(blue)
color(500)=(green)
color(1000)=(red)
}},
%thick,
axis x line* = bottom,
axis y line* = left,
width={2}cm, height={3}cm,
xlabel={{{4}}},
ylabel={{{5}}},
cycle list={{%
{{darkgreen, solid}},
{{blue, densely dashed}},
{{red, dashdotdotted}},
{{brown, densely dotted}},
{{black, loosely dashdotted}}
}},
{6}%
]
{7}%
{8}%
{9}%
\\end{{{0}}}
%\\end{{tikzpicture}}
'''.format(axis,mins,
size[0],size[1],t1,t2,
args,plots,line,
clip_str)
return res
ltl3ba = 'LTL3BA'
fgm = 'FG-merging'
fm = 'F-merging'
basic = 'basic'
size = (4,4)
clip_names = ('xmin','ymin','xmax','ymax')
kw = {}
sc_plot(runners['lit'],basic,fgm,'sc_lit.tex',size=size,kw=kw.copy())
size = (4.3,4.5)
kw['title'] = 'literature'
sc_plot(runners['lit'],basic,fgm,'sc_lit.tex',size=size,kw=kw.copy())
for suff in ['1','2','4','fg']:
kw['title'] = 'rand'+suff
sc_plot(runners['rand'+suff],basic,fgm,'sc_rand{}.tex'.format(suff),size=size,kw=kw.copy())
cp sc_lit.tex sc_rand*.tex ~/research/ltl3tela_papers
r = runners['rand4']
r.smaller_than('basic','F-merging')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <hr style="border
Step2:
Step3:
Step4:
Step5:
Step6:
Step7: <hr style="border
Step8:
Step9:
Step10: <hr style="border
Step11:
Step12:
Step13:
Step14:
Step15:
Step16:
Step17: <hr style="border
Step18: <hr style="border
Step19: <hr style="border
Step20: <hr style="border
Step21: <hr style="border
Step22: <hr style="border
|
<ASSISTANT_TASK:>
Python Code:
# calculate pi
import numpy as np
# N : number of iterations
def calc_pi(N):
x = np.random.ranf(N);
y = np.random.ranf(N);
r = np.sqrt(x*x + y*y);
c=r[ r <= 1.0 ]
return 4*float((c.size))/float(N)
# time the results
pts = 6; N = np.logspace(1,8,num=pts);
result = np.zeros(pts); count = 0;
for n in N:
result = %timeit -o -n1 calc_pi(n)
result[count] = result.best
count += 1
# and save results to file
np.savetxt('calcpi_timings.txt', np.c_[N,results],
fmt='%1.4e %1.6e');
# import numpy as alias np
import numpy as np
# create a 1d array with a list
a = np.array( [-1,0,1] ); a
# use arrays to create arrays
b = np.array( a ); b
# use numpy functions to create arrays
# arange for arrays, range for lists!
a = np.arange( -2, 6, 2 ); a
# between start, stop, sample step points
a = np.linspace(-10, 10, 5);
a;
# Ex: can you guess these functions do?
b = np.zeros(3); print b
c = np.ones(3); print c
# Ex++: what does this do? Check documentation!
h = np.hstack( (a, a, a), 0 ); print h
# array characteristics such as:
print a
print a.ndim # dimensions
print a.shape # shape
print a.size # size
print a.dtype # data type
# can choose data type
a = np.array( [1,2,3], np.int16 ); a.dtype
# multi-dimensional arrays e.g. 2d array or matrix
# e.g. list of lists
mat = np.array( [[1,2,3], [4,5,6]]);
print mat; print mat.size; mat.shape
# join arrays along first axis (0)
d = np.r_[np.array([1,2,3]), 0, 0, [4,5,6]];
print d; d.shape
# join arrays along second axis (1)
d = np.c_[np.array([1,2,3]), [4,5,6]];
print d; d.shape
# Ex: use r_, c_ with nd (n>1) arrays
# Ex: can you guess the shape of these arrays?
h = np.array( [1,2,3,4,5,6] );
i = np.array( [[1,1],[2,2],[3,3],[4,4],[5,5],[6,6]] );
j = np.array( [[[1],[2],[3],[4],[5],[6]]] );
k = np.array( [[[[1],[2],[3],[4],[5],[6]]]] );
# reshape 1d arrays into nd arrays original matrix unaffected
mat = np.arange(6); print mat
print mat.reshape( (3, 2) )
print mat; print mat.size;
print mat.shape
# can also use the shape, this modifies the original array
a = np.zeros(10); print a
a.shape = (2,5)
print a; print a.shape;
# Ex: what do flatten() and ravel()?
# use online documentation, or '?'
mat2 = mat.flatten()
mat2 = mat.ravel()
# Ex: split a martix? Change the cuts and axis values
# need help?: np.split?
cuts=2;
np.split(mat, cuts, axis=0)
# Ex: can you guess what these functions do?
# np.copyto(b, a);
# v = np.vstack( (arr2d, arr2d) ); print v; v.ndim;
# c0 = np.concatenate( (arr2d, arr2d), axis=0); c0;
# c1 = np.concatenate(( mat, mat ), axis=1); print "c1:", c1;
# Ex++: other functions to explore
#
# stack(arrays[, axis])
# tile(A, reps)
# repeat(a, repeats[, axis])
# unique(ar[, return_index, return_inverse, ...])
# trim_zeros(filt[, trim]), fill(scalar)
# xv, yv = meshgrid(x,y)
# basic indexing and slicing we know from lists
a = np.arange(8); print a
a[3]
# a[start:stop:step] --> [start, stop every step)
print a[0:7:2]
print a[0::2]
# negative indices are valid!
# last element index is -1
print a[2:-3:2]
# basic indexing of a 2d array : take care of each dimension
nd = np.arange(12).reshape((4,3)); print nd;
print nd[2,2];
print nd[2][2];
# get corner elements 0,2,9,11
print nd[0:4:3, 0:3:2]
# Ex: get elements 7,8,10,11 that make up the bottom right corner
nd = np.arange(12).reshape((4,3));
print nd; nd[2:4, 1:3]
# slices are views (like references)
# on an array, can change elements
nd[2:4, 1:3] = -1; nd
# assign slice to a variable to prevent this
s = nd[2:4, 1:3]; print nd;
s = -1; nd
# Care - simple assignment between arrays
# creates references!
nd = np.arange(12).reshape((4,3))
md = nd
md[3] = 1000
print nd
# avoid this by creating distinct copies
# using copy()
nd = np.arange(12).reshape((4,3))
md = nd.copy()
md[3] = 999
print nd
# advanced or fancy indexing lets you do more
p = np.array( [[0,1,2], [3,4,5], [6,7,8], [9,10,11]] );
print p
rows = [0,0,3,3]; cols = [0,2,0,2];
print p[rows, cols]
# Ex: what will this slice look like?
m = np.array( [[0,-1,4,20,99], [-3,-5,6,7,-10]] );
print m[[0,1,1,1], [1,0,1,4]];
# can use conditionals in indexing
# m = np.array([[0,-1,4,20,99],[-3,-5,6,7,-10]]);
m[ m < 0 ]
# Ex: can you guess what this does? query: np.sum?
y = np.array([[0, 1], [1, 1], [2, 2]]);
rowsum = y.sum(1);
y[rowsum <= 2, :]
# Ex: and this?
a = np.arange(10);
mask = np.ones(len(a), dtype = bool);
mask[[0,2,4]] = False; print mask
result = a[mask]; result
# Ex: r=np.array([[0,1,2],[3,4,5]]);
xp = np.array( [[[1,11],[2,22],[3,33]], [[4,44],[5,55],[6,66]]] );
xp[slice(1), slice(1,3,None), slice(1)]; xp[:1, 1:3:, :1];
print xp[[1,1,1],[1,2,1],[0,1,0]]
# add an element with insert
a = np.arange(6).reshape([2,3]); print a
np.append(a, np.ones([2,3]), axis=0)
# inserting an array of elements
np.insert(a, 1, -10, axis=0)
# can use delete, or a boolean mask, to delete array elements
a = np.arange(10)
np.delete(a, [0,2,4], axis=0)
# vectorization allows element-wise operations (no for loop!)
a = np.arange(10).reshape([2,5]); b = np.arange(10).reshape([2,5]);
-0.1*a
a*b
a/(b+1) #.astype(float)
# random floats
a = np.random.ranf(10); a
# create random 2d int array
a = np.random.randint(0, high=5, size=25).reshape(5,5);
print a;
# generate sample from normal distribution
# (mean=0, standard deviation=1)
s = np.random.standard_normal((5,5)); s;
# Ex: what other ways are there to generate random numbers?
# What other distributions can you sample?
# easy way to save data to text file
pts = 5; x = np.arange(pts); y = np.random.random(pts);
# format specifiers: d = int, f = float, e = scientific
np.savetxt('savedata.txt', np.c_[x,y], header = 'DATA', footer = 'END',
fmt = '%d %1.4f')
!cat savedata.txt
# One could do ...
# p = np.loadtxt('savedata.txt')
# ...but much more flexibility with genfromtext
p = np.genfromtxt('savedata.txt', skip_header=2, skip_footer=1); p
# Ex++: what do numpy.save, numpy.load do ?
# calculate pi using polynomials
# import Polynomial class
from numpy.polynomial import Polynomial as poly;
num = 100000;
denominator = np.arange(num);
denominator[3::4] *= -1 # every other odd coefficient is -ve
numerator = np.ones(denominator.size);
# avoid dividing by zero, drop first element denominator
almost = numerator[1:]/denominator[1:];
# make even coefficients zero
almost[1::2] = 0
# add back zero coefficient
coeffs = np.r_[0,almost];
p = poly(coeffs);
4*p(1) # pi approximation
# accessing a 2d array
nd = np.arange(100).reshape((10,10))
# accessing element of 2d array
%timeit -n10000000 -r3 nd[5][5]
%timeit -n10000000 -r3 nd[(5,5)]
# Ex: multiplying two vectors
x=np.arange(10E7)
%timeit -n1 -r10 x*x
%timeit -n1 -r10 x**2
# Ex++: from the linear algebra package
%timeit -n1 -r10 np.dot(x,x)
import numpy as np
# Ex: range functions and iterating in for loops
size = int(1E6);
%timeit for x in range(size): x ** 2
# faster than range for very large arrays?
%timeit for x in xrange(size): x ** 2
%timeit for x in np.arange(size): x ** 2
%timeit np.arange(size) ** 2
# Ex: look at the calculating pi code
# Make sure you understand it. Time the code.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The ndarray structure
Step2: Array creation methods
Step3: Aggregate methods (min and max)
Step4: Summations
Step5: Transform a 1D array into a 2D array
Step6: Identity Matrix
Step7: Random Numbers
Step8: Sample from the normal distribution
Step9: Indexing and slicing
Step10: Tiling
Step11: Broadcasting
Step12: Arithmetic
Step13: Universal functions
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
my_vector = np.array([1, 2, 3, 4])
my_vector
my_vector.shape
my_vector.dtype
my_matrix = np.array([[1, 2], [3, 4]])
my_matrix
my_matrix.shape
# Find the length of each element in bytes
my_matrix.itemsize
my_matrix2 = np.array([[1, 2], [3, 4]], dtype=np.int8)
my_matrix2.itemsize
# Create an uninitialised array of specified shape and dtype
np.empty(shape=(4,4),dtype=np.int8)
np.zeros(4)
np.zeros((4,4))
np.zeros((4,4)) + 42
# Create a new zero matrix of the same shape as another matrix.
np.zeros_like(my_matrix)
np.ones(4)
np.ones((4,4))
# Similar to Python's built-in range() function
np.arange(start=0, stop=10, step=2)
# Like arange() but instead of a step size, we specify the
# number of values that we need. It generates lineary-spaced
# numbers in the given interval
np.linspace(start=10, stop=20, num=5)
# Generate numbers that are evenly spaced on a logarithmic scale
np.logspace(start=1, stop=2, num=10)
arr1 = np.array([10, 87, 86, 5, 4, 38, 94, 76, 12, 17])
arr1
arr1.max(), arr1.argmax()
arr1.min(), arr1.argmin()
arr1_copy = arr1.copy()
arr1_copy
matrix1 = np.arange(1,26).reshape(5,5)
matrix1
# Sum values in the matrix
matrix1.sum()
# Sum values by column
matrix1.sum(0)
prime_numbers = np.array([2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97])
prime_numbers
prime_numbers.reshape(5, 5)
np.eye(4)
# Use NumPy to generate four random numbers between 0 and 1
np.random.rand(4)
np.random.rand(4, 4)
np.random.randint(1, 101)
# Generates 10 random integers between 1 and 100
np.random.randint(1, 101, 10)
# Generate four numbers from the normal distribution centred around zero
np.random.randn(4)
np.random.randn(4, 4)
arr2 = np.arange(0, 101, 10)
arr2
arr2[2]
# Use Python's slice notation to fetch elements from the array
arr2[3:6]
arr2[3:]
arr2[:4]
# Boolean indexing
arr2[arr2 > 5]
arr2 > 5
arr_with_nans = np.array([np.nan, 1,2,np.nan,3,4,5])
arr_with_nans
# Get an array where NaN elements are omitted
arr_with_nans[~np.isnan(arr_with_nans)]
matrix2 = np.arange(1, 26).reshape(5,5)
matrix2
matrix2[1]
matrix2[1,2] # same as matrix[1][2]
matrix2[1:4,1:4]
# Use ellipsis to get elements from the third column
matrix2[...,2]
# Fetch elements placed at corners of the 5x5 array
rows = np.array([[0,0],[4,4]])
cols = np.array([[0,4],[0,4]])
matrix2[rows, cols]
arr3 = np.array([9, 4, 4])
arr3
np.tile(arr3, (4, 1))
np.tile(arr3, (5, 2))
macro_nutrients = np.array([[0.3, 2.5, 3.5],
[2.9, 27.5, 0],
[0.4, 1.3, 23.9],
[14.4, 6, 2.3]])
calories_per_macro = np.array([9, 4, 4])
macro_nutrients * calories_per_macro
arr4 = np.arange(0, 10)
arr4
arr4[0:4] = 10
arr4
arr5 = np.arange(0, 10)
arr6 = np.arange(10, 20)
arr5
arr6
arr5 + arr6
arr6 - arr5
arr5 * arr6
arr5 + 10 # broadcasting
arr6 - 10
arr5 ** 2
# NumPy generates a warning if we attempt to divide by zero.
arr5 / arr5
1 / arr5
arr7 = np.array([2, 6, 7, 10, 45, 200])
arr7
# Computes the square root of each element of the array
np.sqrt(arr7)
np.exp(arr7)
np.log(arr7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Romániai lakosság letöltése INSSE-ról
Step2: Wikipédia táblázatok letöltése
Step3: Ha html5llib not found hibaüzenetet kapunk, akkor egy konzol (Command Prompt, Parancssor) megnyitásával és a conda install html5lib vagy pip install html5lib parancsokal telepítjük. Ezután újra kell indítani a Jupyter-t.
Step4: A táblázatlistából nincsen szükség csak a 5. (tehát 4-es indexű, 0-tól kezdődik) táblázatra. Ezt mentsük el az gf változóba, aminek a típusa egy pandas dataframe lesz.
Step5: Csak az 1-től 4-ig terjedő sorok van szükség, a többit eldobjuk.
Step6: Transzponáljuk a táblázatot
Step7: D3plus-ba betölthető json formátumban elmentjük a táblázat tartalmát.
Step8: Az eredmény
Step9: Elmentjük a fájlt
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import html5lib
import matplotlib.pyplot as plt
%matplotlib inline
csv_path='exportPivot_POP105A.csv' #SAJAT HELY CSV FILE
df=pd.read_csv(csv_path)
df.head()
wiki_path="http://hu.wikipedia.org/wiki/Csíkszereda"
df2=pd.read_html(wiki_path)
df2[4]
gf=df2[4]
gf
ef=gf[1:4]
ef.columns=ef.loc[ef.index[0]]
ef=ef.drop(1)
ef=ef.set_index(ef.columns[0])
ef=ef.drop(u'Év',axis=1)
ef
rf=ef.T
rf.head(2)
#uj=[[] for i in range(len(rf.columns))]
d3=[]
ujnevek=['ujmax','ujmin']
for k in range(len(rf.index)):
i=rf.index[k]
seged={}
for j in range(len(rf.loc[i])):
uc=unicode(rf.loc[i][j])
if ',' in uc:
ertek=-int(uc[1:-2])
else:
ertek=int(uc[0:-1])
#uj[j].append(ertek)
seged[ujnevek[j]]=ertek
seged["honap"]=rf.index[k]
seged["honap2"]=k+1
d3.append(seged)
d3
import json
file('uj.json','w').write(json.dumps(d3))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notice that the edge list is empty, since we haven't added any edges yet. Also, because the number of edges in a graph can become very large, there is an iterator method for returning edges
Step2: If we had a script that needs to add a single edge from a tuple, we would use * preceeding the tuple or it's assigned variable in the add_edge method
Step3: We can also add nodes and edges using objects called nbunch and ebunch. These objects are any iterables or generators of nodes or edge tuples. We will do this below using a couple different methods.
Step4: We can also remove nodes or edges using similar methods, just replacing 'add' with 'remove'
Step5: Notice that removing nodes automatically removed the related edges for us. One last basic inspection method is to get a list of neighbors (adjacent nodes) for a specific node in a graph.
Step6: While we have been using numbers to represent nodes, we can use any hashable object as a node. For example, this means that lists, sets and arrays can't be nodes, but frozensets can
Step7: Edge properties
Step8: Subscript notation for accessing edges
Step9: We can also modify specific edge attributes
Step10: And we can add other attributes
Step11: 1. Create a complete graph with 7 nodes and verify that it is complete by looking at the edges. Do this manually and using a built-in method.
Step12: 2. Create a function that will draw a given graph that has a layout type parameter and labels the nodes. Now draw the graph created in the last problem using circular layout.
Step13: 3. Create a graph with 10 nodes and 3 components. Draw this graph.
Step14: 4. Create a simple connected digraph with 4 nodes and a diameter of 2.
Step15: 5. Create another 4 node digraph with weighted edges. Draw this graph with node and edge weight labels.
Step16: 6. Create the adjacnency matrix from the edge data in edges_1.pkl
Step17: 7. Using networkx built-in functions, create the distance matrix for the same graph from the previous problem
Step18: 8. Identify and remove a cutpoint from this graph and re-draw it
Step19: 9. Use edges_2 to create a graph. List any subgraphs that are maximal cliques
Step20: 10. Determine the Degree, Closeness, and Betweenness measures of centrality for this network
Step21: 11. Based on the measures above, which actors have the greatest control over the flow of information? Why do some actors have betweenness measures of zero?
Step22: 13. Create a directed graph from edges_3.pkl and do the following
|
<ASSISTANT_TASK:>
Python Code:
# isntantiate a graph object
G = nx.Graph()
# add a single node
G.add_node(1)
# add multiple nodes from a list
G.add_nodes_from([2,3,5])
# return lists of nodes and edges in the graph
G.nodes(), G.edges()
# add a single edge between 3 and 5
G.add_edge(3,5)
# add multiple edges using list of tuples
edge_list = [(1,2),(2,3),(2,5)]
G.add_edges_from(edge_list)
G.edges()
# the asterisk indicates that the values should be extracted
G.add_edge(*(1,3))
G.edges()
# generate a graph of linearly connected nodes
# this is a graph of a single path with 5 nodes and 4 edges
H = nx.path_graph(5)
# a look at the nodes and edges produced
H.nodes(), H.edges()
# Create a graph using nbunch and ebunch from the graph H
G = nx.Graph()
G.add_nodes_from(H)
# we have to specify edges
G.add_edges_from(H.edges())
G.nodes(), G.edges()
# now add edges to a graph using an iterator instead of an iterable list
# this is another example of ebunch, and node iterators work too
G = nx.Graph()
G.add_nodes_from([1,2,3])
# create edge generator connecting all possible node pairs
from itertools import combinations
edge_generator = combinations([1,2,3], 2)
# to show this is a generator and not a list
print('not a list: ', edge_generator)
# now lets add the edges using the iterator
G.add_edges_from(edge_generator)
G.edges()
H.nodes(), H.edges()
H.remove_nodes_from([0,4])
H.nodes(), H.edges()
H = nx.path_graph(7)
# get the neighbors for node 5
H.neighbors(5)
G = nx.Graph()
# G.add_node([0,1]) <-- raises error
# G.add_node({0,1}) <-- raises error
G.add_node(frozenset([0,1])) # this works
G.nodes()
G = nx.Graph()
G.add_nodes_from([1,2,3])
G.add_weighted_edges_from([(1,2,3.14), (2,3,6.5)])
# calling edges() alone will not return weights
print(G.edges(), '\n')
# we need to use the data parameter to get triples
print(G.edges(data='weight'), '\n')
# we can also get data for individual edges
print(G.get_edge_data(1,2))
# get edge data for node 2
print(G[2], '\n')
# subscript further to get only the weight for edge between 2 and 3
print(G[2][3])
G[2][3]['weight'] = 17
G[2][3]
G[2][3]['attr'] = 'value'
G[2][3]
# manually
from itertools import combinations
complete_edges = combinations(range(7), 2)
G_complete = nx.Graph(complete_edges)
G_complete.edges()
# built-in method
G_complete = nx.complete_graph(7)
G_complete.edges()
# function to draw and label nodes in a graph
def draw(G, layout):
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
warnings.filterwarnings("ignore",category=UserWarning)
nx.draw(G, pos=layout(G))
nx.draw_networkx_labels(G, pos=layout(G));
draw(G_complete, nx.circular_layout)
from networkx.drawing.nx_agraph import graphviz_layout
edges_1 = [(0,1), (0,2)]
edges_2 = [(3,4), (3,5), (4,5), (4,6)]
edges_3 = [(7,8), (7,9)]
G = nx.Graph(edges_1 + edges_2 + edges_3)
draw(G, graphviz_layout)
# create in and out edge lists
out_edges = [(0,1), (0,2), (1,2), (2,3)]
# create the empty digraph
G = nx.DiGraph(out_edges)
draw(G, graphviz_layout)
out_edges = [(0,1,0.5), (0,2,0.5), (1,2,1), (2,3,0.7)]
G = nx.DiGraph()
G.add_weighted_edges_from(out_edges)
draw(G, graphviz_layout)
labels = nx.get_edge_attributes(G, 'weight')
nx.draw_networkx_edge_labels(G, pos=graphviz_layout(G), edge_labels=labels);
import pickle
with open('../edges_1.pkl', 'rb') as f:
edges = pickle.load(f)
G = nx.Graph(edges)
# lets see what it looks like
draw(G, graphviz_layout)
adj_matrix = nx.to_numpy_matrix(G)
DF(adj_matrix)
geodesics = nx.all_pairs_shortest_path_length(G)
# this gave us a dict of shortest path lengths between all connected pairs
geodesics
# we can easily convert this to a matrix using pandas
DF(geodesics)
# 4 is a cutpoint
G.remove_node(4)
draw(G, graphviz_layout)
# G = nx.moebius_kantor_graph()
G = nx.dorogovtsev_goltsev_mendes_graph(3)
with open('../edges_2.pkl', 'rb') as f:
edges = pickle.load(f)
G = nx.Graph(edges)
# draw(G, graphviz_layout)
draw(G, nx.circular_layout)
list(nx.find_cliques(G))
degree = nx.degree_centrality(G)
closeness = nx.closeness_centrality(G)
betweenness = nx.betweenness_centrality(G)
Series(degree)
Series(closeness)
Series(betweenness)
H = G.copy()
H.remove_node(0)
H.add_edge(10,14)
H.remove_edge(1,2)
# draw(H, graphviz_layout)
draw(H, nx.circular_layout)
# eccentricity of node 1
nx.eccentricity(H, 1)
# find cliques containing node 1
nx.cliques_containing_node(H, 1)
# density
nx.density(H)
# remove node 1
H.remove_node(1)
nx.density(H)
with open('edges_3.pkl', 'rb') as f:
edges = pickle.load(f)
G = nx.DiGraph(edges)
draw(G, graphviz_layout)
# adjacency matrix
adj_matrix = nx.to_numpy_matrix(G)
DF(adj_matrix)
# indegree and outdegree
indegree = adj_matrix.sum(axis=0) / (len(adj_matrix)-1)
outdegree = adj_matrix.sum(axis=1) / (len(adj_matrix)-1)
in_method = nx.in_degree_centrality(G)
out_method = nx.out_degree_centrality(G)
# indegree comparison
(Series(np.array(indegree).flatten()) == Series(in_method)).all()
# outdegree comparison
(Series(np.array(outdegree).flatten()) == Series(out_method)).all()
Series(in_method).sort_values(ascending=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: Linear models in keras
Step3: We can use keras to create a simple linear model (Dense() - with no activation - in Keras) and optimize it using SGD to minimize mean squared error (mse)
Step4: (See the Optim Tutorial notebook and associated Excel spreadsheet to learn all about SGD and related optimization algorithms.)
Step5: And, of course, we can also take a look at the weights - after fitting, we should see that they are close to the weights we used to calculate y (2.0, 3.0, and 1.0).
Step6: Train linear model on predictions
Step7: We will process as many images at a time as our graphics card allows. This is a case of trial and error to find the max batch size - the largest size that doesn't give an out of memory error.
Step8: We need to start with our VGG 16 model, since we'll be using its predictions and features.
Step9: Our overall approach here will be
Step10: Loading and resizing the images every time we want to use them isn't necessary - instead we should save the processed arrays. By far the fastest way to save and load numpy arrays is using bcolz. This also compresses the arrays, so we save disk space. Here are the functions we'll use to save and load using bcolz.
Step11: We have provided a simple function that joins the arrays from all the batches - let's use this to grab the training and validation data
Step12: We can load our training and validation data later without recalculating them
Step13: Keras returns classes as a single column, so we convert to one hot encoding
Step14: ...and their 1,000 imagenet probabilties from VGG16--these will be the features for our linear model
Step15: We can load our training and validation features later without recalculating them
Step16: Now we can define our linear model, just like we did earlier
Step17: We're ready to fit the model!
Step18: Viewing model prediction examples
Step19: Get the filenames for the validation set, so we can view images
Step20: Helper function to plot images by index in the validation set
Step21: Perhaps the most common way to analyze the result of a classification model is to use a confusion matrix. Scikit-learn has a convenient function we can use for this purpose
Step22: We can just print out the confusion matrix, or we can show a graphical view (which is mainly useful for dependents with a larger number of categories).
Step23: About activation functions
Step24: Careful! Now that we've modified the definition of model, be careful not to rerun any code in the previous sections, without first recreating the model from scratch! (Yes, I made that mistake myself, which is why I'm warning you about it now...)
Step25: ...and compile our updated model, and set up our batches to use the preprocessed images (note that now we will also shuffle the training batches, to add more randomness when using multiple epochs)
Step26: We'll define a simple function for fitting models, just to save a little typing...
Step27: ...and now we can use it to train the last layer of our model!
Step28: Before moving on, go back and look at how little code we had to write in this section to finetune the model. Because this is such an important and common operation, keras is set up to make it as easy as possible. We didn't even have to use any external helper functions in this section.
Step29: We can look at the earlier prediction examples visualizations by redefining probs and preds and re-using our earlier code.
Step30: Retraining more layers
Step31: The key insight is that the stacking of linear functions and non-linear activations we learnt about in the last section is simply defining a function of functions (of functions, of functions...). Each layer is taking the output of the previous layer's function, and using it as input into its function. Therefore, we can calculate the derivative at any layer by simply multiplying the gradients of that layer and all of its following layers together! This use of the chain rule to allow us to rapidly calculate the derivatives of our model at any layer is referred to as back propagation.
Step32: Since we haven't changed our architecture, there's no need to re-compile the model - instead, we just set the learning rate. Since we're training more layers, and since we've already optimized the last layer, we should use a lower learning rate than previously.
Step33: This is an extraordinarily powerful 5 lines of code. We have fine-tuned all of our dense layers to be optimized for our specific data set. This kind of technique has only become accessible in the last year or two - and we can already do it in just 5 lines of python!
Step34: There's generally little room for improvement in training the convolutional layers, if you're using the model on natural images (as we are). However, there's no harm trying a few of the later conv layers, since it may give a slight improvement, and can't hurt (and we can always load the previous weights if the accuracy decreases).
Step35: You can always load the weights later and use the model to do whatever you need
|
<ASSISTANT_TASK:>
Python Code:
# Rather than importing everything manually, we'll make things easy
# and load them all in utils.py, and just import them from there.
%matplotlib inline
import utils; reload(utils)
from utils import *
%matplotlib inline
from __future__ import division,print_function
import os, json
from glob import glob
import numpy as np
import scipy
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix
np.set_printoptions(precision=4, linewidth=100)
from matplotlib import pyplot as plt
import utils; reload(utils)
from utils import plots, get_batches, plot_confusion_matrix, get_data
from numpy.random import random, permutation
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.models import Sequential
from keras.layers import Input
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop
from keras.preprocessing import image
x = random((30,2))
y = np.dot(x, [2., 3.]) + 1.
x[:5]
y[:5]
lm = Sequential([ Dense(1, input_shape=(2,)) ])
lm.compile(optimizer=SGD(lr=0.1), loss='mse')
lm.evaluate(x, y, verbose=0)
lm.fit(x, y, nb_epoch=5, batch_size=1)
lm.evaluate(x, y, verbose=0)
lm.get_weights()
path = "data/dogscats/sample/"
# path = "data/dogscats/"
model_path = path + 'models/'
if not os.path.exists(model_path): os.mkdir(model_path)
# batch_size=100
batch_size=4
from vgg16 import Vgg16
vgg = Vgg16()
model = vgg.model
# Use batch size of 1 since we're just doing preprocessing on the CPU
val_batches = get_batches(path+'valid', shuffle=False, batch_size=1)
batches = get_batches(path+'train', shuffle=False, batch_size=1)
import bcolz
def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w'); c.flush()
def load_array(fname): return bcolz.open(fname)[:]
val_data = get_data(path+'valid')
trn_data = get_data(path+'train')
trn_data.shape
save_array(model_path+'train_data.bc', trn_data)
save_array(model_path+'valid_data.bc', val_data)
trn_data = load_array(model_path+'train_data.bc')
val_data = load_array(model_path+'valid_data.bc')
val_data.shape
def onehot(x): return np.array(OneHotEncoder().fit_transform(x.reshape(-1,1)).todense())
val_classes = val_batches.classes
trn_classes = batches.classes
val_labels = onehot(val_classes)
trn_labels = onehot(trn_classes)
trn_labels.shape
trn_classes[:4]
trn_labels[:4]
trn_features = model.predict(trn_data, batch_size=batch_size)
val_features = model.predict(val_data, batch_size=batch_size)
trn_features.shape
save_array(model_path+'train_lastlayer_features.bc', trn_features)
save_array(model_path+'valid_lastlayer_features.bc', val_features)
trn_features = load_array(model_path+'train_lastlayer_features.bc')
val_features = load_array(model_path+'valid_lastlayer_features.bc')
# 1000 inputs, since that's the saved features, and 2 outputs, for dog and cat
lm = Sequential([ Dense(2, activation='softmax', input_shape=(1000,)) ])
lm.compile(optimizer=RMSprop(lr=0.1), loss='categorical_crossentropy', metrics=['accuracy'])
batch_size=64
batch_size=4
lm.fit(trn_features, trn_labels, nb_epoch=3, batch_size=batch_size,
validation_data=(val_features, val_labels))
lm.summary()
# We want both the classes...
preds = lm.predict_classes(val_features, batch_size=batch_size)
# ...and the probabilities of being a cat
probs = lm.predict_proba(val_features, batch_size=batch_size)[:,0]
probs[:8]
preds[:8]
filenames = val_batches.filenames
# Number of images to view for each visualization task
n_view = 4
def plots_idx(idx, titles=None):
plots([image.load_img(path + 'valid/' + filenames[i]) for i in idx], titles=titles)
#1. A few correct labels at random
correct = np.where(preds==val_labels[:,1])[0]
idx = permutation(correct)[:n_view]
plots_idx(idx, probs[idx])
#2. A few incorrect labels at random
incorrect = np.where(preds!=val_labels[:,1])[0]
idx = permutation(incorrect)[:n_view]
plots_idx(idx, probs[idx])
#3. The images we most confident were cats, and are actually cats
correct_cats = np.where((preds==0) & (preds==val_labels[:,1]))[0]
most_correct_cats = np.argsort(probs[correct_cats])[::-1][:n_view]
plots_idx(correct_cats[most_correct_cats], probs[correct_cats][most_correct_cats])
# as above, but dogs
correct_dogs = np.where((preds==1) & (preds==val_labels[:,1]))[0]
most_correct_dogs = np.argsort(probs[correct_dogs])[:n_view]
plots_idx(correct_dogs[most_correct_dogs], 1-probs[correct_dogs][most_correct_dogs])
#3. The images we were most confident were cats, but are actually dogs
incorrect_cats = np.where((preds==0) & (preds!=val_labels[:,1]))[0]
most_incorrect_cats = np.argsort(probs[incorrect_cats])[::-1][:n_view]
if len(most_incorrect_cats):
plots_idx(incorrect_cats[most_incorrect_cats], probs[incorrect_cats][most_incorrect_cats])
else:
print('No incorrect cats!')
#3. The images we were most confident were dogs, but are actually cats
incorrect_dogs = np.where((preds==1) & (preds!=val_labels[:,1]))[0]
most_incorrect_dogs = np.argsort(probs[incorrect_dogs])[:n_view]
if len(most_incorrect_dogs):
plots_idx(incorrect_dogs[most_incorrect_dogs], 1-probs[incorrect_dogs][most_incorrect_dogs])
else:
print('No incorrect dogs!')
#5. The most uncertain labels (ie those with probability closest to 0.5).
most_uncertain = np.argsort(np.abs(probs-0.5))
plots_idx(most_uncertain[:n_view], probs[most_uncertain])
cm = confusion_matrix(val_classes, preds)
plot_confusion_matrix(cm, val_batches.class_indices)
vgg.model.summary()
model.pop()
for layer in model.layers: layer.trainable=False
model.add(Dense(2, activation='softmax'))
??vgg.finetune
gen=image.ImageDataGenerator()
batches = gen.flow(trn_data, trn_labels, batch_size=batch_size, shuffle=True)
val_batches = gen.flow(val_data, val_labels, batch_size=batch_size, shuffle=False)
def fit_model(model, batches, val_batches, nb_epoch=1):
model.fit_generator(batches, samples_per_epoch=batches.n, nb_epoch=nb_epoch,
validation_data=val_batches, nb_val_samples=val_batches.n)
opt = RMSprop(lr=0.1)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
fit_model(model, batches, val_batches, nb_epoch=2)
model.save_weights(model_path+'finetune1.h5')
model.load_weights(model_path+'finetune1.h5')
model.evaluate(val_data, val_labels)
preds = model.predict_classes(val_data, batch_size=batch_size)
probs = model.predict_proba(val_data, batch_size=batch_size)[:,0]
probs[:8]
cm = confusion_matrix(val_classes, preds)
plot_confusion_matrix(cm, {'cat':0, 'dog':1})
# sympy let's us do symbolic differentiation (and much more!) in python
import sympy as sp
# we have to define our variables
x = sp.var('x')
# then we can request the derivative or any expression of that variable
pow(2*x,2).diff()
layers = model.layers
# Get the index of the first dense layer...
first_dense_idx = [index for index,layer in enumerate(layers) if type(layer) is Dense][0]
# ...and set this and all subsequent layers to trainable
for layer in layers[first_dense_idx:]: layer.trainable=True
K.set_value(opt.lr, 0.01)
fit_model(model, batches, val_batches, 3)
model.save_weights(model_path+'finetune2.h5')
for layer in layers[12:]: layer.trainable=True
K.set_value(opt.lr, 0.001)
fit_model(model, batches, val_batches, 4)
model.save_weights(model_path+'finetune3.h5')
model.load_weights(model_path+'finetune2.h5')
model.evaluate_generator(get_batches(path+'valid', gen, False, batch_size*2), val_batches.n)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Start from files on disk
Step2: Visualize a FES and the molecular structures behind it
Step3: Visualize trajectories and molecular structures (and the FES, optionally)
Step4: Repeat from memory
Step5: The mpx_wdg_box
|
<ASSISTANT_TASK:>
Python Code:
import molpx
%matplotlib ipympl
top = molpx._molpxdir(join='notebooks/data/ala2.pdb')
MD_trajfiles = [molpx._molpxdir(join='notebooks/data/ala2.mini.xtc')] #short trajectory
rama_files = [molpx._molpxdir(join='notebooks/data/ala2.mini.phi.psi.dat')]
mpx_wdg_box = molpx.visualize.FES(MD_trajfiles,
top,
rama_files,
nbins=50,
proj_labels=['$\phi$',
'$\psi$'],
atom_selection="symbol != H",
#proj_idxs=[1],
#n_overlays=5,
#sticky=True,
#color_list='random'
)
mpx_wdg_box
from molpx import visualize, _linkutils
from imp import reload
reload(visualize)
reload(_linkutils)
from matplotlib import pyplot as plt
plt.close('all')
mpx_wdg_box = molpx.visualize.traj(MD_trajfiles,
top,
rama_files,
plot_FES = True,
proj_labels=['$\phi$', '$\psi$']
)
mpx_wdg_box
import numpy as np
import mdtraj as md
MD_trajs = [md.load(fname, top=top) for fname in MD_trajfiles]
phi_psi = [np.loadtxt(fname) for fname in rama_files]
mpx_wdg_box = molpx.visualize.FES(MD_trajs,
top,
phi_psi,
nbins=50,
proj_labels=['$\phi$',
'$\psi$'],
atom_selection="symbol != H",
#proj_idxs=[1],
#n_overlays=5,
#sticky=True,
#color_list='random'
)
mpx_wdg_box
for attr in dir(mpx_wdg_box):
if attr.startswith('linked_'):
print(attr)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Above is the output I'm getting- still need to discuss interpolation and also adding in the parameter for number of timeseries to find
|
<ASSISTANT_TASK:>
Python Code:
import MessageFormatting
import importlib
importlib.reload(MessageFormatting)
from MessageFormatting import *
from timeseries.ArrayTimeSeries import ArrayTimeSeries as ts
import numpy as np
from scipy.stats import norm
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, 100, 100) + 1000*np.random.randn(100)
ts_test = ts(t, v)
d2 = {'op':'storeTS','id':1000,'ts':[[1,2,3], [-1,3,-10]],'courtesy':'please'}
#d2 = {'op':'TSfromID','id':1000,'courtesy':'please'}
#d2 = {'op':'simsearch_id','id':12,'n_closest':2,'courtesy':'please'}
#d2 = {'op':'simsearch_ts','ts':[list(ts_test.times()), list(ts_test.values())],'courtesy':'please'}
s2 = serialize(json.dumps(d2))
s2
import sys
from socket import socket, AF_INET, SOCK_STREAM
s = socket(AF_INET, SOCK_STREAM)
s.connect(('localhost', 20000))
s.send(s2)
msg = s.recv(8192)
print(msg)
ds = Deserializer()
ds.append(msg)
ds.ready()
response = ds.deserialize()
#print(response)
if 'ts' in response:
a = response['ts']
elif 'id' in response:
a = response['id']
print(response)
print(a)
print(a)
a = b = 0
response['ts']
def dic_fun(**kwargs):
a = {}
for k,v in kwargs.items():
a[k]=v
print(a)
dic_fun(a=12,b=17)
import MessageFormatting
import importlib
importlib.reload(MessageFormatting)
from MessageFormatting import *
d2 = {'op':'simsearch_ts','ts':[[1,2,3],[4,5,6]],'courtesy':'please'}
d2 = {'op':'simsearch_id','id':12,'courtesy':'please','n_closest':12}
d2 = {'op':'TSfromID','id':12,'courtesy':'please'}
c = TSDBOp.from_json(d2)
c
ds = Deserializer()
ds.append(msg)
ds.deserialize()
msg
json.dumps('success!')
#json.loads(TSDBOp.to_json('success!'))
from Similarity.find_most_similar import find_most_similiar
sys.path
os.getcwd()
os.path.dirname(os.path.abspath(__file__))
from StorageManager import FileStorageManager
sm = File
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set hyperparameters
Step2: Define class for Deep-Q-Learning agent
Step3: Set other parameters (some of these should be moved to top of file)
|
<ASSISTANT_TASK:>
Python Code:
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import os # for creating directories
output_dir = 'model_output/cartpole/'
n_episodes = 1001 # n games we want agent to play (default 1001)
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000) # double-ended queue; acts like list, but elements can be added/removed from either end
self.gamma = 0.95 # decay or discount rate: enables agent to take into account future actions in addition to the immediate ones, but discounted at this rate
self.epsilon = 1.0 # exploration rate: how much to act randomly; more initially than later due to decay
self.epsilon_min = 0.01 # minimum amount of random exploration permitted
self.epsilon_decay = 0.995 # decrease number of random explorations as the agent's performance improves
self.learning_rate = 0.001 # rate at which NN adjusts models parameters via SGD to reduce cost
self.model = self._build_model()
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done)) # list of previous experiences, enabling re-training later
def act(self, state):
if np.random.rand() <= self.epsilon: # if acting randomly, take random action
return random.randrange(self.action_size)
act_values = self.model.predict(state) # if not acting randomly, predict reward value based on current state
return np.argmax(act_values[0]) # pick the action that will give the highest reward (i.e., go left or right?)
def replay(self, batch_size): # method that trains NN with experiences sampled from memory
minibatch = random.sample(self.memory, batch_size) # sample a minibatch from memory
for state, action, reward, next_state, done in minibatch: # extract data for each minibatch sample
target = reward # if done (boolean whether game ended or not, i.e., whether final state or not), then target = reward
if not done: # if not done, then predict future discounted reward
target = (reward + self.gamma * # (target) = reward + (discount rate gamma) *
np.amax(self.model.predict(next_state)[0])) # (maximum target Q based on future action a')
target_f = self.model.predict(state) # approximately map current state to future discounted reward
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0) # single epoch of training with x=state, y=target_f; fit decreases loss btwn target_f and y_hat
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
env = gym.make('CartPole-v1') # initialise environment
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size, action_size) # initialise agent
# agent.load("./save/cartpole-dqn.h5") # JK FIX
done = False
batch_size = 32
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for e in range(n_episodes): # iterate over new episodes of the game
state = env.reset() # reset state at start of each new episode of the game
state = np.reshape(state, [1, state_size])
for time in range(5000): # time represents a frame of the game; goal is to keep pole upright as long as possible up to range, e.g., 500 or 5000 timesteps
env.render()
action = agent.act(state) # action is either 0 or 1 (move cart left or right); decide on one or other here
next_state, reward, done, _ = env.step(action) # agent interacts with env, gets feedback; 4 state data points, e.g., pole angle, cart position
reward = reward if not done else -10 # reward +1 for each additional frame with pole upright
next_state = np.reshape(next_state, [1, state_size])
agent.remember(state, action, reward, next_state, done) # remember the previous timestep's state, actions, reward, etc.
state = next_state # set "current state" for upcoming iteration to the current next state
if done: # episode ends if agent drops pole or we reach timestep 5000
print("episode: {}/{}, score: {}, e: {:.2}" # print the episode's score
.format(e, n_episodes, time, agent.epsilon))
break # exit loop
if len(agent.memory) > batch_size:
agent.replay(batch_size) # train the agent by replaying the experiences of the episode
if e % 50 == 0:
agent.save(output_dir + "weights_" + '{:04d}'.format(e) + ".hdf5")
# env.render(close=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I want to count the number of rentals per vehicle ID in reservations.csv, appending these values as a column in vehicles.csv, in order to compare vehicle properties to reservation numbers directly. I also expect that a key factor in customer decisions may be the price difference between the actual and recommended prices, so I create a new column for this parameter as well. Finally, I merge the two dataframes to facilitate easy histogram plotting and analysis using the reservations data as a basis.
Step2: Finding the most important factors driving the total number of reservations
Step3: The plot above shows every measured parameter, along with the newly added price-difference and number-of-reservations parameters, plotted against one another. I used this to try to quickly determine, visually, what parameters might drive the number of reservations. This is probably at the limit, in terms of number of parameters, of what I would just throw onto a scatter plot to inspect. A few things pop out
Step4: In the above figure, I show the recommended price versus the actual price in a hex density plot, with the color intensity representing the number of reservations. The orange line represents a one-to-one correlation between the two parameters. One can see immediately that there is a high density of reservations corresponding to the one-to-one line, where the actual price very nearly matches the recommended price. I also feel confident, now, that the recommended price does not need to be a free parameter in the future machine learning analysis, instead substituting the price difference.
Step5: In the above code, I explore the data set using a Random Forest algorithm. It is a relatively quick and exceptionally versatile way to examine labelled data and derive relationships. In this case, I am using it to "score" the various parameters by how much they contribute to the number of reservations.
Step6: In the above figures, I examine exactly how the price factors in to the number of reservations. These histograms show the number of reservations on the y-axis as a function of either the actual price or the difference between the actual price and the recommended price.
Step7: The final important parameter is the length of the description. The above plot shows the frequency of reservations as a function of the description character length. I can quickly conclude that more reservations are made for cars with descriptions less than 50 characters. After this point, the length of the description does not play a major role in one's decision whether or not to reserve a vehicle.
Step8: In the above plots, I show the normalized frequency of reservation for the three types of reservations. On the x-axis, a 0 represents the absence of the technology package, and a 1 represents a vehicle having the technology. It is visually obvious that a proportionately larger number of hourly reservations are made with vehicles having the technology package. We can statistically support this claim.
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
plt.style.use('ggplot')
plt.rc('figure',figsize=(13,13))
# Make things look nicer
LABEL_SIZE = 18
LEGEND_SIZE = 12
plt.rc('lines', linewidth=1.75)
# Read in data to pandas dataframes
VEHICLE_DATA = pd.read_csv("vehicles.csv")
RESERVATION_DATA = pd.read_csv("reservations.csv")
# Count frequency of rentals and add column to VEHICLE_DATA
veh_id = VEHICLE_DATA.as_matrix(columns=['vehicle_id'])
res_id = RESERVATION_DATA.as_matrix(columns=['vehicle_id'])
# Use numpy here to ensure zero counts as a value
n_reservations = np.zeros(len(veh_id))
for i,id in enumerate(veh_id):
n_reservations[i] = len(np.where(id == res_id)[0])
VEHICLE_DATA['num_reservations'] = n_reservations.astype(int)
# Add column with difference between the recommended price and the actual price
VEHICLE_DATA['diff_price'] = VEHICLE_DATA['recommended_price'] - VEHICLE_DATA['actual_price']
# Add a column that 'bucketizes' the number of reservations (low, med, high) categories
VEHICLE_DATA['categorical_reservations'] = pd.cut(VEHICLE_DATA['num_reservations'], 3, labels=["low","medium","high"])
# Merge databases to get vehicle features in the RESERVATION_DATA dataframe
MERGED_DATA = pd.merge(VEHICLE_DATA,RESERVATION_DATA)
# Define columns to plot initially as an exploration step
PLOT_COLUMNS = ['technology', 'num_images', 'street_parked', 'description','actual_price',
'recommended_price','diff_price','num_reservations']
from pandas.tools.plotting import scatter_matrix
scatter_matrix(VEHICLE_DATA[PLOT_COLUMNS], diagonal='kde')
plt.show()
plt.rcParams.update({'font.size': LABEL_SIZE})
plt.rc('figure',figsize=(10,8))
VEHICLE_DATA.plot.hexbin(x='actual_price',y='recommended_price',C='num_reservations',reduce_C_function=np.max,
gridsize=25,figsize=(10,8))
plt.ylabel("Recommended Price")
plt.xlabel("Actual Price")
plt.plot([34,90],[34,90],color='orange',linewidth=3)
plt.show()
# Define feature columns to explore with machine learning
FEATURE_COLUMNS = ['technology', 'num_images', 'street_parked', 'description','diff_price','actual_price']
# Random forest regressor for continuous num_reservations
TARGET_COLUMN = ['num_reservations']
rf = RandomForestRegressor()
rf.fit(VEHICLE_DATA[FEATURE_COLUMNS],VEHICLE_DATA[TARGET_COLUMN].values.ravel())
print "====================================================================================="
print "Features Sorted by Score for Regressor:\n"
print sorted(zip(map(lambda x: round(x,4), rf.feature_importances_),FEATURE_COLUMNS),reverse=True)
# Random forest classifier for bucketized num_reservations
TARGET_COLUMN = ['categorical_reservations']
rf = RandomForestClassifier()
rf.fit(VEHICLE_DATA[FEATURE_COLUMNS],VEHICLE_DATA[TARGET_COLUMN].values.ravel())
print "\nFeatures Sorted by Score for Classifier:\n"
print sorted(zip(map(lambda x: round(x,4), rf.feature_importances_),FEATURE_COLUMNS),reverse=True)
print "====================================================================================="
fig, axes = plt.subplots(nrows=1, ncols=2, sharey=True)
fig.set_figheight(6)
fig.set_figwidth(16)
MERGED_DATA[['actual_price']].plot.hist(bins=15,ax=axes[0])
axes[0].set_xlabel("Actual Price")
MERGED_DATA[['diff_price']].plot.hist(bins=15,ax=axes[1])
plt.xlabel("Price Difference")
plt.show()
MERGED_DATA[['description']].plot.hist(bins=15,figsize=(8,6))
plt.show()
plt.rc('figure',figsize=(8,6))
plt.figure(1)
MERGED_DATA.loc[MERGED_DATA['reservation_type']==1]['technology'].plot.hist(alpha=0.5,title="Hourly",
normed=True)
plt.xlabel("With or Without Technology")
plt.figure(2)
MERGED_DATA.loc[MERGED_DATA['reservation_type']==2]['technology'].plot.hist(alpha=0.5,title="Daily",
normed=True)
plt.xlabel("With or Without Technology")
plt.figure(3)
MERGED_DATA.loc[MERGED_DATA['reservation_type']==3]['technology'].plot.hist(alpha=0.5,title="Weekly",
normed=True)
plt.xlabel("With or Without Technology")
plt.show()
import scipy.stats
KSstatistic, pvalue = scipy.stats.ks_2samp(MERGED_DATA.loc[MERGED_DATA['reservation_type']==3]['technology'],
MERGED_DATA.loc[MERGED_DATA['reservation_type']==2]['technology'])
print "KS probability that Weekly and Daily reservations are drawn from the same underlying population:\n"
print "P(KS) = {}\n".format(pvalue)
KSstatistic, pvalue = scipy.stats.ks_2samp(MERGED_DATA.loc[MERGED_DATA['reservation_type']==1]['technology'],
MERGED_DATA.loc[MERGED_DATA['reservation_type']==2]['technology'])
print "KS probability that Hourly and Daily reservations are drawn from the same underlying population:\n"
print "P(KS) = {}\n".format(pvalue)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then we import the xcs module and run the built-in test() function. By default, the test() function runs the canonical XCS algorithm on the 11-bit (3-bit address) MUX problem for 10,000 steps.
Step2: ```
Step3: The XCSAlgorithm class contains the actual XCS algorithm implementation. The ClassifierSet class is used to represent the algorithm's state, in the form of a set of classifier rules. MUXProblem is the classic multiplexer problem, which defaults to 3 address bits (11 bits total). ScenarioObserver is a wrapper for scenarios which logs the inputs, actions, and rewards as the algorithm attempts to solve the problem.
Step4: Next, we'll create the algorithm which will be used to manage the classifier set and learn the mapping defined by the problem we have selected
Step5: The algorithm's parameters are set to appropriate defaults for most problems, but it is straight forward to modify them if it becomes necessary.
Step6: Here we have selected an exploration probability of .1, which will sacrifice most (9 out of 10) learning opportunities in favor of taking advantage of what has already been learned so far. This makes sense in real-time learning environment; a lower value is more appropriate in cases where the classifier is being trained in advance or is being used simply to learn a minimal rule set. The discount factor is set to 0, since future rewards are not affected at all by the currently selected action. (This is not strictly necessary, since the scenario will inform the algorithm that reward chaining should not be used, but it is useful to highlight this fact.) We have also elected to turn on GA and action set subsumption, which help the system to converge to the minimal effective rule set more quickly in some types of scenarios.
Step7: The algorithm does the work for us, initializing the classifier set as it deems appropriate for the scenario we have provided. It provides the classifier set with the possible actions that can be taken in the given scenario; these are necessary for the classifier set to perform covering operations when the algorithm determines that the classifiers in the population provide insufficient coverage for a particular situation. (Covering is the addition to the population of a randomly generated classifier rule whose condition matches the current situation.)
Step8: We pass the scenario to the classifier set and ask it to run to learn the appropriate input/output mapping. It executes training cycles until the scenario dictates that training should stop. Note that if you wish to see the progress as the algorithm interacts with the scenario, you will need to set the logging level to INFO, as described in the previous section, before calling the run() method.
Step9: ```
Step10: Defining New Scenario Types
Step11: We defined a new class, HaystackProblem, to represent this test case, which inherits from Scenario to ensure that we cannot instantiate the problem until the appropriate methods have been implemented.
Step12: The input_size is saved as a member for later use. Likewise, the value of training_cycles was saved in two places
Step13: The implementations for the property and the methods other than sense() and execute() will be trivial, so let's start with those
Step14: Now we are going to get into the meat of the problem. We want to give the algorithm a random string of bits of size input_size and have it pick out the location of the needle bit through trial and error, by telling us what it thinks the value of the needle bit is. For this to be a useful test, the needle bit needs to be in a fixed location, which we have not yet defined. Let's choose a random bit from among inputs on each run.
Step15: The sense() method is going to create a string of random bits of size input_size and return it. But first it will pick out the value of the needle bit, located at needle_index, and store it in a new member, needle_value, so that execute(action) will know what the correct value for action is.
Step16: Now we need to define the execute(action) method. In order to give the algorithm appropriate feedback to make problem solvable, we should return a high reward when it guesses the correct value for the needle bit, and a low value otherwise. Thus we will return a 1 when the action is the value of the needle bit, and a 0 otherwise. We must also make sure to decrement the remaining cycles to prevent the problem from running indefinitely.
Step17: We have now defined all of the methods that Scenario requires. Let's give it a test run.
Step18: ```
Step19: ```
|
<ASSISTANT_TASK:>
Python Code:
import logging
logging.root.setLevel(logging.INFO)
import xcs
xcs.test()
from xcs import XCSAlgorithm
from xcs.scenarios import MUXProblem, ScenarioObserver
scenario = ScenarioObserver(MUXProblem(50000))
algorithm = XCSAlgorithm()
algorithm.exploration_probability = .1
algorithm.discount_factor = 0
algorithm.do_ga_subsumption = True
algorithm.do_action_set_subsumption = True
model = algorithm.new_model(scenario)
model.run(scenario, learn=True)
print(model)
print(len(model))
for rule in model:
if rule.fitness > .5 and rule.experience >= 10:
print(rule.condition, '=>', rule.action, ' [%.5f]' % rule.fitness)
from xcs.scenarios import Scenario
class HaystackProblem(Scenario):
pass
from xcs.scenarios import Scenario
class HaystackProblem(Scenario):
def __init__(self, training_cycles=1000, input_size=500):
self.input_size = input_size
self.possible_actions = (True, False)
self.initial_training_cycles = training_cycles
self.remaining_cycles = training_cycles
problem = HaystackProblem()
from xcs.scenarios import Scenario
class HaystackProblem(Scenario):
def __init__(self, training_cycles=1000, input_size=500):
self.input_size = input_size
self.possible_actions = (True, False)
self.initial_training_cycles = training_cycles
self.remaining_cycles = training_cycles
@property
def is_dynamic(self):
return False
def get_possible_actions(self):
return self.possible_actions
def reset(self):
self.remaining_cycles = self.initial_training_cycles
def more(self):
return self.remaining_cycles > 0
import random
from xcs.scenarios import Scenario
class HaystackProblem(Scenario):
def __init__(self, training_cycles=1000, input_size=500):
self.input_size = input_size
self.possible_actions = (True, False)
self.initial_training_cycles = training_cycles
self.remaining_cycles = training_cycles
self.needle_index = random.randrange(input_size)
@property
def is_dynamic(self):
return False
def get_possible_actions(self):
return self.possible_actions
def reset(self):
self.remaining_cycles = self.initial_training_cycles
self.needle_index = random.randrange(self.input_size)
def more(self):
return self.remaining_cycles > 0
import random
from xcs.scenarios import Scenario
from xcs.bitstrings import BitString
class HaystackProblem(Scenario):
def __init__(self, training_cycles=1000, input_size=500):
self.input_size = input_size
self.possible_actions = (True, False)
self.initial_training_cycles = training_cycles
self.remaining_cycles = training_cycles
self.needle_index = random.randrange(input_size)
self.needle_value = None
@property
def is_dynamic(self):
return False
def get_possible_actions(self):
return self.possible_actions
def reset(self):
self.remaining_cycles = self.initial_training_cycles
self.needle_index = random.randrange(self.input_size)
def more(self):
return self.remaining_cycles > 0
def sense(self):
haystack = BitString.random(self.input_size)
self.needle_value = haystack[self.needle_index]
return haystack
import random
from xcs.scenarios import Scenario
from xcs.bitstrings import BitString
class HaystackProblem(Scenario):
def __init__(self, training_cycles=1000, input_size=500):
self.input_size = input_size
self.possible_actions = (True, False)
self.initial_training_cycles = training_cycles
self.remaining_cycles = training_cycles
self.needle_index = random.randrange(input_size)
self.needle_value = None
@property
def is_dynamic(self):
return False
def get_possible_actions(self):
return self.possible_actions
def reset(self):
self.remaining_cycles = self.initial_training_cycles
self.needle_index = random.randrange(self.input_size)
def more(self):
return self.remaining_cycles > 0
def sense(self):
haystack = BitString.random(self.input_size)
self.needle_value = haystack[self.needle_index]
return haystack
def execute(self, action):
self.remaining_cycles -= 1
return action == self.needle_value
import logging
import xcs
from xcs.scenarios import ScenarioObserver
# Setup logging so we can see the test run as it progresses.
logging.root.setLevel(logging.INFO)
# Create the scenario instance
problem = HaystackProblem()
# Wrap the scenario instance in an observer so progress gets logged,
# and pass it on to the test() function.
xcs.test(scenario=ScenarioObserver(problem))
problem = HaystackProblem(training_cycles=10000, input_size=100)
xcs.test(scenario=ScenarioObserver(problem))
problem = HaystackProblem(training_cycles=10000, input_size=500)
algorithm = xcs.XCSAlgorithm()
# Default parameter settings in test()
algorithm.exploration_probability = .1
# Modified parameter settings
algorithm.ga_threshold = 1
algorithm.crossover_probability = .5
algorithm.do_action_set_subsumption = True
algorithm.do_ga_subsumption = False
algorithm.wildcard_probability = .998
algorithm.deletion_threshold = 1
algorithm.mutation_probability = .002
xcs.test(algorithm, scenario=ScenarioObserver(problem))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: WLS Estimation
Step2: WLS knowing the true variance ratio of heteroscedasticity
Step3: OLS vs. WLS
Step4: Compare the WLS standard errors to heteroscedasticity corrected OLS standard errors
Step5: Calculate OLS prediction interval
Step6: Draw a plot to compare predicted values in WLS and OLS
Step7: Feasible Weighted Least Squares (2-stage FWLS)
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt)
np.random.seed(1024)
nsample = 50
x = np.linspace(0, 20, nsample)
X = np.column_stack((x, (x - 5)**2))
X = sm.add_constant(X)
beta = [5., 0.5, -0.01]
sig = 0.5
w = np.ones(nsample)
w[nsample * 6//10:] = 3
y_true = np.dot(X, beta)
e = np.random.normal(size=nsample)
y = y_true + sig * w * e
X = X[:,[0,1]]
mod_wls = sm.WLS(y, X, weights=1./(w ** 2))
res_wls = mod_wls.fit()
print(res_wls.summary())
res_ols = sm.OLS(y, X).fit()
print(res_ols.params)
print(res_wls.params)
se = np.vstack([[res_wls.bse], [res_ols.bse], [res_ols.HC0_se],
[res_ols.HC1_se], [res_ols.HC2_se], [res_ols.HC3_se]])
se = np.round(se,4)
colnames = ['x1', 'const']
rownames = ['WLS', 'OLS', 'OLS_HC0', 'OLS_HC1', 'OLS_HC3', 'OLS_HC3']
tabl = SimpleTable(se, colnames, rownames, txt_fmt=default_txt_fmt)
print(tabl)
covb = res_ols.cov_params()
prediction_var = res_ols.mse_resid + (X * np.dot(covb,X.T).T).sum(1)
prediction_std = np.sqrt(prediction_var)
tppf = stats.t.ppf(0.975, res_ols.df_resid)
prstd_ols, iv_l_ols, iv_u_ols = wls_prediction_std(res_ols)
prstd, iv_l, iv_u = wls_prediction_std(res_wls)
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(x, y, 'o', label="Data")
ax.plot(x, y_true, 'b-', label="True")
# OLS
ax.plot(x, res_ols.fittedvalues, 'r--')
ax.plot(x, iv_u_ols, 'r--', label="OLS")
ax.plot(x, iv_l_ols, 'r--')
# WLS
ax.plot(x, res_wls.fittedvalues, 'g--.')
ax.plot(x, iv_u, 'g--', label="WLS")
ax.plot(x, iv_l, 'g--')
ax.legend(loc="best");
resid1 = res_ols.resid[w==1.]
var1 = resid1.var(ddof=int(res_ols.df_model)+1)
resid2 = res_ols.resid[w!=1.]
var2 = resid2.var(ddof=int(res_ols.df_model)+1)
w_est = w.copy()
w_est[w!=1.] = np.sqrt(var2) / np.sqrt(var1)
res_fwls = sm.WLS(y, X, 1./((w_est ** 2))).fit()
print(res_fwls.summary())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'sandbox-3', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: tensors, ranks, shapes and types
Step2: session
Step3: variables
Step4: single variable linear regression
Step5: placeholders and variables
Step6: cost minimization
Step7: multivariable linear regression
Step8: hypothesis defined using a matrix
Step9: qualifying accuracy, ROC curves
Step10: reading data
Step11: preprocessing
Step12: logistic regression
Step13: example
Step14: one-hot encoding
Step15: softmax
Step16: training and test datasets
Step17: training epoch/batch
Step18: So, the form of the data is a simple NumPy array that van be represented as an image
Step19: An array can be represented as an image
Step20: An array can be represented as an image using padding as necessary
Step21: example
Step22: example
Step23: deep learning
Step24: simple neural network versus deep neural network
Step25: example
Step26: rectifier activation functions
Step27: Xavier initialization
Step28: example
Step29: example
Step30: convolutional neural network
Step33: example
Step34: MNIST comparison using identical learning rate, training epochs and batch size
Step35: The object returned is a scipy.optimize.OptimizeResult, which is essentially a dictionary subclass.
Step37: Hyperparameter optimization of a machine learning algorithm is often an exchaustive exploration of a subset of the space of all hyperparameter configurations (for example, using a grid search like sklearn.model_selection.GridSearchCV), which can be time-consuming. Scikit-Optimize gp_minimize can be used to tune hyperparameters using sequential model-based optimization.
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
tf.TF_CPP_MIN_LOG_LEVEL = 3
# Create a constant operation. This operation is added as a node to the default graph.
hello = tf.constant("hello world")
# Start a TensorFlow session.
sess = tf.Session()
# Run the operation and get the result.
print(sess.run(hello))
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0) # (also tf.float32 by default)
node3 = tf.add(node1, node2)
print("node1: {node}".format(node = node1))
print("node2: {node}".format(node = node2))
print("node3: {node}".format(node = node3))
sess = tf.Session()
print("sess.run(node1, node2): {result}".format(
result = sess.run([node1, node2])
))
print("sess.run(node3): {result}".format(
result = sess.run(node3)
))
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
# Create a node that is a shortcut for tf.add(a, b).
adder_node = a + b
result = sess.run(
adder_node,
feed_dict = {
a: 3,
b: 4.5
}
)
print(result)
result = sess.run(
adder_node,
feed_dict = {
a: [1,3],
b: [2, 4]
}
)
print(result)
add_and_triple = adder_node * 3.
result = sess.run(
add_and_triple,
feed_dict = {
a: 3,
b: 4.5
}
)
print(result)
sess = tf.Session()
a = tf.constant([10 , 20])
b = tf.constant([1.0, 2.0])
print(sess.run(a))
print(sess.run([a, b]))
# Create two variables.
weights = tf.Variable(
tf.random_normal(
[784, 200],
stddev = 0.35
),
name = "weights"
)
biases = tf.Variable(
tf.zeros([200]),
name = "biases"
)
# Create an operation to initialize the variables.
init_op = tf.global_variables_initializer()
# more code
with tf.Session() as sess:
sess.run(init_op)
import tensorflow as tf
tf.set_random_seed(777)
# Create some data.
x_train = [1, 2, 3]
y_train = [1, 2, 3]
# Build the graph using TensorFlow operations. With the hypothesis H(x) = Wx + b, the goal is to try to find values for W and b to in order to calculate y_data = x_data * W + b. Analytically, W should be 1 and b should be 0.
W = tf.Variable(tf.random_normal([1]), name = "weight")
b = tf.Variable(tf.random_normal([1]), name = "bias")
# Define the hypothesis.
hypothesis = x_train * W + b
# Define the cost function.
cost = tf.reduce_mean(tf.square(hypothesis - y_train))
# Define a method of minimisation, in this case gradient descent. In gradient descent, steps proportional to the negative of the function gradient at the current point are taken. It is the method of steepest descent to find the local minimum of a function.
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.01)
train = optimizer.minimize(cost)
# Launch the graph in a session.
sess = tf.Session()
# Initialize global variables in the graph.
sess.run(tf.global_variables_initializer())
# Fit.
for step in range(2001):
sess.run(train)
if step % 500 == 0:
print("step: {step}, cost: {cost}, W: {W}, b: {b}".format(
step = step,
cost = sess.run(cost),
W = sess.run(W),
b = sess.run(b)
))
import tensorflow as tf
W = tf.Variable(tf.random_normal([1]), name = "weight")
b = tf.Variable(tf.random_normal([1]), name = "bias")
# Create placeholders for tensors for x and y data.
X = tf.placeholder(tf.float32, shape = [None])
Y = tf.placeholder(tf.float32, shape = [None])
hypothesis = x_train * W + b
cost = tf.reduce_mean(tf.square(hypothesis - y_train))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.01)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Fit.
for step in range(2001):
cost_value, W_value, b_value, _ = sess.run(
[cost, W, b, train],
feed_dict = {
X: [1, 2, 3],
Y: [1, 2, 3]
}
)
if step % 500 == 0:
print("step: {step}, cost: {cost}, W: {W}, b: {b}".format(
step = step,
cost = cost_value,
W = W_value,
b = b_value
))
# Test the trained model.
print(sess.run(hypothesis, feed_dict={X: [5]}))
print(sess.run(hypothesis, feed_dict={X: [2.5]}))
print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
import tensorflow as tf
import matplotlib.pyplot as plt
X = [1, 2, 3]
Y = [1, 2, 3]
W = tf.placeholder(tf.float32)
hypothesis = X * W
cost = tf.reduce_mean(tf.square(hypothesis - Y))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Variables for plotting cost function
W_value = []
cost_value = []
for i in range(-30, 50):
feed_W = i * 0.1
cost_current, W_current = sess.run(
[cost, W],
feed_dict = {W: feed_W}
)
W_value.append(W_current)
cost_value.append(cost_current)
plt.xlabel("W"); plt.ylabel("cost(W)")
plt.plot(W_value, cost_value)
plt.axes().set_aspect(1 / plt.axes().get_data_ratio())
plt.show()
import tensorflow as tf
x1_data = [ 73., 93., 89., 96., 73.]
x2_data = [ 80., 88., 91., 98., 66.]
x3_data = [ 75., 93., 90., 100., 70.]
y_data = [152., 185., 180., 196., 142.]
x1 = tf.placeholder(tf.float32)
x2 = tf.placeholder(tf.float32)
x3 = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
w1 = tf.Variable(tf.random_normal([1]), name = "weight1")
w2 = tf.Variable(tf.random_normal([1]), name = "weight2")
w3 = tf.Variable(tf.random_normal([1]), name = "weight3")
b = tf.Variable(tf.random_normal([1]), name = "bias" )
hypothesis = x1 * w1 + x2 * w2 + x3 * w3 + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_value, hy_value, _ = sess.run(
[cost, hypothesis, train],
feed_dict = {
x1: x1_data,
x2: x2_data,
x3: x3_data,
Y: y_data
}
)
if step % 500 == 0:
print("\nstep: {step}, cost: {cost},\nprediction: {prediction}".format(
step = step,
cost = cost_value,
prediction = hy_value
))
import tensorflow as tf
x_data = [
[ 73., 80., 75.],
[ 93., 88., 93.],
[ 89., 91., 90.],
[ 96., 98., 100.],
[ 73., 66., 70.]
]
y_data = [
[152.],
[185.],
[180.],
[196.],
[142.]
]
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name = "weight")
b = tf.Variable(tf.random_normal([1]), name = "bias" )
hypothesis = tf.matmul(X, W) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_value, hy_value, _ = sess.run(
[cost, hypothesis, train], feed_dict={X: x_data, Y: y_data})
if step % 500 == 0:
print("\nstep: {step}, cost: {cost},\nprediction:\n{prediction}".format(
step = step,
cost = cost_value,
prediction = hy_value
))
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics
y_true = np.array([ 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0])
y_probabilities = np.array([0.9, 0.8, 0.7, 0.6, 0.55, 0.54, 0.53, 0.52, 0.51, 0.505, 0.4, 0.39, 0.38, 0.37, 0.36, 0.35, 0.34, 0.33, 0.30, 0.1])
# get false-positive rate, true-postive rate and thresholds
FPR, TPR, thresholds = sklearn.metrics.roc_curve(y_true, y_probabilities)
ROC_AUC = sklearn.metrics.auc(y_true, y_probabilities)
plt.plot(FPR, TPR, label = "ROC curve (area = {area})".format(area = ROC_AUC))
plt.plot([0, 1], [0, 1], "k--") # random predictions curve
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel("false positive rate")
plt.ylabel("true positive rate"
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.xlabel("W"); plt.ylabel("cost(W)")
plt.plot(W_value, cost_value)
plt.axes().set_aspect(1 / plt.axes().get_data_ratio())
plt.show()
import numpy as np
import tensorflow as tf
xy = np.loadtxt(
"data.csv",
delimiter = ",",
dtype = np.float32
)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name = "weight")
b = tf.Variable(tf.random_normal([1]), name = "bias" )
hypothesis = tf.matmul(X, W) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_value, hy_value, _ = sess.run(
[cost, hypothesis, train],
feed_dict = {X: x_data, Y: y_data}
)
if step % 500 == 0:
print("\nstep: {step}, cost: {cost},\nprediction:\n{prediction}".format(
step = step,
cost = cost_value,
prediction = hy_value
))
print("\npredictions")
test_x_data = [
[100, 70, 101]
]
result = sess.run(hypothesis, feed_dict = {X: test_x_data})
print("\ninput data: {data},\nscore prediction:\n{prediction}".format(
data = test_x_data,
prediction = result
))
test_x_data = [
[60, 70, 110],
[90, 100, 80]
]
result = sess.run(hypothesis, feed_dict = {X: test_x_data})
print("\ninput data: {data},\nscore prediction:\n{prediction}".format(
data = test_x_data,
prediction = result
))
import tensorflow as tf
filename_queue = tf.train.string_input_producer(
["data.csv"],
shuffle = False,
name = "filename_queue")
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
# Set default values for empty columns and specify the decoded result type.
xy = tf.decode_csv(
value,
record_defaults = [[0.], [0.], [0.], [0.]]
)
# Collect batches of CSV.
train_x_batch, train_y_batch =\
tf.train.batch(
[xy[0:-1], xy[-1:]],
batch_size = 10
)
X = tf.placeholder(tf.float32, shape = [None, 3])
Y = tf.placeholder(tf.float32, shape = [None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name = "weight")
b = tf.Variable(tf.random_normal([1]), name = "bias")
hypothesis = tf.matmul(X, W) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(
sess = sess,
coord = coord
)
for step in range(2001):
x_batch, y_batch = sess.run([train_x_batch, train_y_batch])
cost_value, hy_value, _ = sess.run(
[cost, hypothesis, train],
feed_dict = {X: x_batch, Y: y_batch}
)
if step % 500 == 0:
print("\nstep: {step}, cost: {cost},\nprediction:\n{prediction}".format(
step = step,
cost = cost_value,
prediction = hy_value
))
coord.request_stop()
coord.join(threads)
print("\npredictions")
test_x_data = [
[100, 70, 101]
]
result = sess.run(hypothesis, feed_dict = {X: test_x_data})
print("\ninput data: {data},\nscore prediction:\n{prediction}".format(
data = test_x_data,
prediction = result
))
test_x_data = [
[60, 70, 110],
[90, 100, 80]
]
result = sess.run(hypothesis, feed_dict = {X: test_x_data})
print("\ninput data: {data},\nscore prediction:\n{prediction}".format(
data = test_x_data,
prediction = result
))
import tensorflow as tf
x_data = [
[1, 2],
[2, 3],
[3, 1],
[4, 3],
[5, 3],
[6, 2]
]
y_data = [
[0],
[0],
[0],
[1],
[1],
[1]
]
X = tf.placeholder(tf.float32, shape = [None, 2])
Y = tf.placeholder(tf.float32, shape = [None, 1])
W = tf.Variable(tf.random_normal([2, 1]), name = "weight")
b = tf.Variable(tf.random_normal([1 ]), name = "bias" )
# hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W)))
hypothesis = tf.sigmoid(tf.matmul(X, W) + b)
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
train = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(cost)
# accuracy computation: true if hypothesis > 0.5 else false
predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype = tf.float32))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(10001):
cost_value, _ = sess.run(
[cost, train],
feed_dict = {X: x_data, Y: y_data}
)
if step % 1000 == 0:
print("\nstep: {step}, cost: {cost}".format(
step = step,
cost = cost_value
))
print("\naccuracy report:")
h, c, a = sess.run(
[hypothesis, predicted, accuracy],
feed_dict = {X: x_data, Y: y_data}
)
print("\nhypothesis:\n\n{hypothesis}\n\ncorrect (Y):\n\n{correct}\n\naccuracy: {accuracy}".format(
hypothesis = h,
correct = c,
accuracy = a
))
import numpy as np
import tensorflow as tf
xy = np.loadtxt(
"output_preprocessed.csv",
skiprows = 1,
delimiter = ",",
dtype = np.float32
)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
number_of_features = x_data.shape[1]
print("features data shape: " + str(x_data.shape))
print("class data shape: " + str(y_data.shape))
print("number of features: " + str(number_of_features))
X = tf.placeholder(tf.float32, shape=[None, number_of_features])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([number_of_features, 1]), name = "weight")
b = tf.Variable(tf.random_normal([1] ), name = "bias" )
hypothesis = tf.sigmoid(tf.matmul(X, W) + b)
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
train = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(cost)
# accuracy computation: true if hypothesis > 0.5 else false
predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype = tf.float32))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print("")
for step in range(5001):
cost_value, _ = sess.run(
[cost, train],
feed_dict = {X: x_data, Y: y_data})
if step % 1000 == 0:
print("step: {step}, cost: {cost}".format(
step = step,
cost = cost_value
))
print("\naccuracy report (testing trained system on training data):")
h, c, a = sess.run(
[hypothesis, predicted, accuracy],
feed_dict = {X: x_data, Y: y_data}
)
print("\nhypothesis:\n\n{hypothesis}\n\ncorrect (Y):\n\n{correct}\n\naccuracy: {accuracy}".format(
hypothesis = h,
correct = c,
accuracy = a
))
import pandas as pd
data = pd.DataFrame({
"A": ["a", "b", "a"],
"B": ["b", "a", "c"]
})
print("\nraw data:\n")
print(data)
# Get one-hot encoding of column B.
one_hot = pd.get_dummies(data["B"])
# Drop column B as it is now encoded.
data = data.drop("B", axis = 1)
# Join the B encoding.
data = data.join(one_hot)
print("\ndata with column B encoded:\n")
print(data)
import tensorflow as tf
x_data = [
[1, 2, 1, 1],
[2, 1, 3, 2],
[3, 1, 3, 4],
[4, 1, 5, 5],
[1, 7, 5, 5],
[1, 2, 5, 6],
[1, 6, 6, 6],
[1, 7, 7, 7]
]
y_data = [
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]
]
X = tf.placeholder("float", [None, 4])
Y = tf.placeholder("float", [None, 3])
number_of_classes = 3
W = tf.Variable(tf.random_normal([4, number_of_classes]), name = "weight")
b = tf.Variable(tf.random_normal([number_of_classes] ), name = "bias" )
# tf.nn.softmax computes softmax activations
# softmax = exp(logits) / reduce_sum(exp(logits), dim)
hypothesis = tf.nn.softmax(tf.matmul(X, W) + b)
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis = 1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.1).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(2001):
sess.run(
optimizer,
feed_dict = {X: x_data, Y: y_data}
)
if step % 200 == 0:
print("step: {step}, cost: {cost}".format(
step = step,
cost = sess.run(cost, feed_dict = {X: x_data, Y: y_data})
))
print("\ntesting\n")
print("------------------------------")
result = sess.run(
hypothesis,
feed_dict = {
X: [[1, 11, 7, 9]]
}
)
print("result:\n\n{result}\n\nargmax: {argmax}".format(
result = result,
argmax = sess.run(tf.argmax(result, 1))
))
print("------------------------------")
result = sess.run(
hypothesis,
feed_dict = {
X: [[1, 3, 4, 3]]
}
)
print("result:\n\n{result}\n\nargmax: {argmax}".format(
result = result,
argmax = sess.run(tf.argmax(result, 1))
))
print("------------------------------")
result = sess.run(
hypothesis,
feed_dict = {
X: [[1, 1, 0, 1]]
}
)
print("result:\n\n{result}\n\nargmax: {argmax}".format(
result = result,
argmax = sess.run(tf.argmax(result, 1))
))
print("------------------------------")
result = sess.run(
hypothesis,
feed_dict = {
X: [
[1, 11, 7, 9],
[1, 3, 4, 3],
[1, 1, 0, 1]
]
}
)
print("result:\n\n{result}\n\nargmax: {argmax}".format(
result = result,
argmax = sess.run(tf.argmax(result, 1))
))
import numpy as np
import tensorflow as tf
# training dataset
x_data = [
[1, 2, 1],
[1, 3, 2],
[1, 3, 4],
[1, 5, 5],
[1, 7, 5],
[1, 2, 5],
[1, 6, 6],
[1, 7, 7]
]
y_data = [
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]
]
# test dataset
x_test = [
[2, 1, 1],
[3, 1, 2],
[3, 3, 4]
]
y_test = [
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]
]
X = tf.placeholder("float", [None, 3])
Y = tf.placeholder("float", [None, 3])
W = tf.Variable(tf.random_normal([3, 3]))
b = tf.Variable(tf.random_normal([3] ))
hypothesis = tf.nn.softmax(tf.matmul(X, W) + b)
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis = 1))
#learning_rate = 1.5
#learning_rate = 1e-10
learning_rate = 0.1
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)
prediction = tf.argmax(hypothesis, 1)
is_correct = tf.equal(prediction, tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("\ntraining")
for step in range(201):
cost_value, W_value, _ = sess.run(
[cost, W, optimizer],
feed_dict = {X: x_data, Y: y_data}
)
if step % 50 == 0:
print("\nstep: {step},\ncost: {cost},\nW:\n{W}".format(
step = step,
cost = cost_value,
W = W_value
))
print("\ntesting\n")
result = sess.run(prediction, feed_dict = {X: x_test} )
accuracy = sess.run(accuracy, feed_dict = {X: x_test, Y: y_test})
print("predictions:\n\n{result}\n\naccuracy:\n\n{accuracy}".format(
result = result,
accuracy = accuracy
))
import matplotlib.pyplot as plt
import random
import tensorflow as tf
import tensorflow.examples.tutorials.mnist
import numpy as np
mnist = tensorflow.examples.tutorials.mnist.input_data.read_data_sets(
"MNIST_data/",
one_hot = True
)
# access some image (of some index number)
# access the (one-hot) class label of the image
index = 15
image = mnist.test.images[index].reshape(28, 28)
label = mnist.test.labels[index:index + 1]
plt.imshow(
image,
cmap = "Greys",
interpolation = "nearest"
)
plt.show()
print("label (extracted by NumPy): {label}".format(
label = np.where(label[0] == 1)[0]))
sess = tf.InteractiveSession()
print("label (extracted by TensorFlow): {label}".format(
label = sess.run(tf.argmax(label, 1))))
import matplotlib.pyplot as plt
import numpy as np
image_array =\
np.array(
[[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.20000002, 0.51764709, 0.83921576,
0.99215692, 0.99607849, 0.99215692, 0.7960785 , 0.63529414,
0.16078432, 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.40000004, 0.55686277,
0.7960785 , 0.7960785 , 0.99215692, 0.98823535, 0.99215692,
0.98823535, 0.59215689, 0.27450982, 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.99607849, 0.99215692,
0.95686281, 0.7960785 , 0.55686277, 0.40000004, 0.32156864,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.67450982, 0.98823535,
0.7960785 , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.08235294, 0.87450987,
0.91764712, 0.11764707, 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0.4784314 ,
0.99215692, 0.19607845, 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0.48235297,
0.99607849, 0.35686275, 0.20000002, 0.20000002, 0.20000002,
0.03921569, 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.08235294, 0.87450987,
0.99215692, 0.98823535, 0.99215692, 0.98823535, 0.99215692,
0.67450982, 0.32156864, 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.08235294, 0.83921576, 0.99215692,
0.7960785 , 0.63529414, 0.40000004, 0.40000004, 0.7960785 ,
0.87450987, 0.99607849, 0.99215692, 0.20000002, 0.03921569,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.2392157 , 0.99215692, 0.67058825,
0. , 0. , 0. , 0. , 0. ,
0.07843138, 0.43921572, 0.75294125, 0.99215692, 0.83137262,
0.16078432, 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.40000004, 0.7960785 ,
0.91764712, 0.20000002, 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0.07843138,
0.83529419, 0.90980399, 0.32156864, 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0.24313727, 0.7960785 , 0.91764712, 0.43921572, 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0.07843138, 0.83529419, 0.98823535, 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.60000002, 0.99215692, 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0.16078432, 0.91372555, 0.83137262, 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.44313729, 0.36078432,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0.12156864, 0.67843139, 0.95686281, 0.15686275, 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.32156864, 0.99215692, 0.59215689,
0. , 0. , 0. , 0. , 0. ,
0. , 0.08235294, 0.40000004, 0.40000004, 0.71764708,
0.91372555, 0.83137262, 0.31764707, 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.32156864, 1. , 0.99215692,
0.91764712, 0.59607846, 0.60000002, 0.75686282, 0.67843139,
0.99215692, 0.99607849, 0.99215692, 0.99607849, 0.83529419,
0.55686277, 0.07843138, 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.27843139, 0.59215689,
0.59215689, 0.90980399, 0.99215692, 0.83137262, 0.75294125,
0.59215689, 0.51372552, 0.19607845, 0.19607845, 0.03921569,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ]], dtype = np.float32)
image = image_array.reshape(28, 28)
plt.imshow(
image,
cmap = "Greys",
interpolation = "nearest"
)
plt.show()
import matplotlib.pyplot as plt
import numpy as np
image_array =\
np.array([
0.5,
0.0,
0.0,
0.0,
1.0,
1.0,
0.5,
0.0,
0.0
])
image = image_array.reshape(3, 3)
print(image)
plt.imshow(
image,
cmap = "Greys",
interpolation = "nearest"
)
plt.show()
import math
import matplotlib.pyplot as plt
import numpy as np
def numpy_array_pad_square_shape(
array = None,
pad_value = 0
):
width_padded = int(math.ceil(math.sqrt(len(array))))
padding = (width_padded ** 2 - len(array)) * [pad_value]
array = np.append(array, padding)
array = array.reshape(width_padded, width_padded)
return array
data = np.array([0.5, 0.0, 0.0, 0.0, 1.0, 1.0, 0.5, 0.0])
image = numpy_array_pad_square_shape(array = data)
plt.imshow(
image,
cmap = "Greys",
interpolation = "nearest"
)
plt.show()
import matplotlib.pyplot as plt
import random
import tensorflow as tf
import tensorflow.examples.tutorials.mnist
mnist = tensorflow.examples.tutorials.mnist.input_data.read_data_sets(
"MNIST_data/",
one_hot = True
)
number_of_classes = 10
# MNIST data image of shape 28 * 28 = 784
X = tf.placeholder(tf.float32, [None, 784])
# 10 classes (digits 0 to 9)
Y = tf.placeholder(tf.float32, [None, number_of_classes])
W = tf.Variable(tf.random_normal([784, number_of_classes]))
b = tf.Variable(tf.random_normal([number_of_classes]))
# hypothesis (using softmax)
hypothesis = tf.nn.softmax(tf.matmul(X, W) + b)
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis = 1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.1).minimize(cost)
is_correct = tf.equal(tf.argmax(hypothesis, 1), tf.arg_max(Y, 1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
# parameters
training_epochs = 15
batch_size = 100
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("\ntraining\n")
for epoch in range(training_epochs):
cost_mean = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
c, _ = sess.run(
[cost, optimizer],
feed_dict={X: batch_xs, Y: batch_ys}
)
cost_mean += c / total_batch
print("epoch: {epoch}\tcost: {cost}".format(
epoch = epoch + 1,
cost = cost_mean
))
print("\ntesting")
accuracy = accuracy.eval(
session = sess,
feed_dict = {
X: mnist.test.images,
Y: mnist.test.labels
}
)
print("\naccuracy:\n\n{accuracy}".format(
accuracy = accuracy
))
# select one test example and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("\nlabel:")
print(sess.run(
tf.argmax(mnist.test.labels[r:r + 1], 1)
)
)
print("\nprediction:")
print(sess.run(
tf.argmax(hypothesis, 1),
feed_dict = {X: mnist.test.images[r:r + 1]}
)
)
plt.imshow(
mnist.test.images[r:r + 1].reshape(28, 28),
cmap = "Greys",
interpolation = "nearest"
)
plt.show()
import numpy as np
import tensorflow as tf
x_data = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
],
dtype=np.float32
)
y_data = np.array([
[0],
[1],
[1],
[0]
],
dtype=np.float32
)
X = tf.placeholder(tf.float32, [None, 2])
Y = tf.placeholder(tf.float32, [None, 1])
W1 = tf.Variable(tf.random_normal([2, 2]), name = "weight1")
b1 = tf.Variable(tf.random_normal([2]), name = "bias1")
layer1 = tf.sigmoid(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_normal([2, 1]), name = "weight2")
b2 = tf.Variable(tf.random_normal([1]), name = "bias2")
hypothesis = tf.sigmoid(tf.matmul(layer1, W2) + b2)
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
train = tf.train.GradientDescentOptimizer(learning_rate = 0.1).minimize(cost)
# accuracy computation: true if hypothesis > 0.5 else false
predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype = tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(10001):
sess.run(
train,
feed_dict = {
X: x_data,
Y: y_data
}
)
if step % 2000 == 0:
print("\nstep: {step}\ncost: {cost}\nW:\n{W}".format(
step = step,
cost = sess.run(
cost,
feed_dict = {
X: x_data,
Y: y_data
}
),
W = sess.run([W1, W2])
))
print("\naccuracy report:")
h, c, a = sess.run(
[hypothesis, predicted, accuracy],
feed_dict = {
X: x_data,
Y: y_data
}
)
print("\nhypothesis:\n\n{hypothesis}\n\ncorrect (Y):\n\n{correct}\n\naccuracy: {accuracy}".format(
hypothesis = h,
correct = c,
accuracy = a
))
import numpy as np
import tensorflow as tf
x_data = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
],
dtype = np.float32
)
y_data = np.array([
[0],
[1],
[1],
[0]
],
dtype = np.float32
)
X = tf.placeholder(tf.float32, [None, 2])
Y = tf.placeholder(tf.float32, [None, 1])
W1 = tf.Variable(tf.random_normal([2, 10]), name = "weight1")
b1 = tf.Variable(tf.random_normal([10]), name = "bias1")
layer1 = tf.sigmoid(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_normal([10, 10]), name = "weight2")
b2 = tf.Variable(tf.random_normal([10]), name = "bias2")
layer2 = tf.sigmoid(tf.matmul(layer1, W2) + b2)
W3 = tf.Variable(tf.random_normal([10, 10]), name = "weight3")
b3 = tf.Variable(tf.random_normal([10]), name = "bias3")
layer3 = tf.sigmoid(tf.matmul(layer2, W3) + b3)
W4 = tf.Variable(tf.random_normal([10, 1]), name = "weight4")
b4 = tf.Variable(tf.random_normal([1]), name = "bias4")
hypothesis = tf.sigmoid(tf.matmul(layer3, W4) + b4)
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
# accuracy computation: true if hypothesis > 0.5 else false
predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype = tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(10001):
sess.run(
train,
feed_dict = {
X: x_data,
Y: y_data
}
)
if step % 2000 == 0:
print("\nstep: {step}\ncost: {cost}\nW:\n{W}".format(
step = step,
cost = sess.run(
cost,
feed_dict = {
X: x_data,
Y: y_data
}
),
W = sess.run([W1, W2])
))
print("\naccuracy report:")
h, c, a = sess.run(
[hypothesis, predicted, accuracy],
feed_dict = {
X: x_data,
Y: y_data
}
)
print("\nhypothesis:\n\n{hypothesis}\n\ncorrect (Y):\n\n{correct}\n\naccuracy: {accuracy}".format(
hypothesis = h,
correct = c,
accuracy = a
))
import tensorflow as tf
import tensorflow.examples.tutorials.mnist
# reset everything to rerun in jupyter
tf.reset_default_graph()
# configuration
batch_size = 100
learning_rate = 0.5
training_epochs = 500
logs_path = "/tmp/mnist/1"
# load mnist data set
from tensorflow.examples.tutorials.mnist import input_data
mnist = tensorflow.examples.tutorials.mnist.input_data.read_data_sets(
"MNIST_data/",
one_hot = True
)
with tf.name_scope("input"):
# None => batch size can be any size; 784 => flattened image
x = tf.placeholder(tf.float32, shape = [None, 784], name = "x-input")
# target 10 output classes
y_ = tf.placeholder(tf.float32, shape = [None, 10], name = "y-input")
with tf.name_scope("weights"):
W = tf.Variable(tf.zeros([784, 10]))
with tf.name_scope("biases"):
b = tf.Variable(tf.zeros([10]))
with tf.name_scope("softmax"):
y = tf.nn.softmax(tf.matmul(x, W) + b)
with tf.name_scope("cross-entropy"):
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), axis = 1))
# specify optimizer
with tf.name_scope("train"):
# optimizer is an "operation" which we can execute in a session
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
tf.summary.scalar("input", x)
summary_operation = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(logs_path)
# perform training cycles
for epoch in range(training_epochs):
# number of batches in one epoch
batch_count = int(mnist.train.num_examples / batch_size)
for i in range(batch_count):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, summary = sess.run(
[train_op, summary_operation],
feed_dict = {x: batch_x, y_: batch_y}
)
writer.add_summary(summary, epoch * batch_count + i)
if epoch % 100 == 0:
print("epoch: {epoch}".format(epoch = epoch))
print("accuracy: {accuracy}".format(
accuracy = accuracy.eval(feed_dict = {
x: mnist.test.images,
y_: mnist.test.labels
}
)
))
import matplotlib.pyplot as plt
import random
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
# weights and bias for layers
W = tf.Variable(tf.random_normal([784, 10]))
b = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(X, W) + b
# define cost/loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = hypothesis, labels = Y))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict = feed_dict)
avg_cost += c / total_batch
print("epoch: {epoch}, cost: {cost}".format(
epoch = epoch,
cost = avg_cost
))
# test accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("accuracy: {accuracy}".format(
accuracy = sess.run(
accuracy,
feed_dict = {
X: mnist.test.images,
Y: mnist.test.labels
}
)
))
# select one test example and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("\nlabel:")
print(sess.run(
tf.argmax(mnist.test.labels[r:r + 1], 1)
)
)
print("\nprediction:")
print(sess.run(
tf.argmax(hypothesis, 1),
feed_dict = {X: mnist.test.images[r:r + 1]}
)
)
plt.imshow(
mnist.test.images[r:r + 1].reshape(28, 28),
cmap = "Greys",
interpolation = "nearest"
)
plt.show()
import random
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
# weights and bias for layers
W1 = tf.Variable(tf.random_normal([784, 256]))
b1 = tf.Variable(tf.random_normal([256]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_normal([256, 256]))
b2 = tf.Variable(tf.random_normal([256]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
W3 = tf.Variable(tf.random_normal([256, 10]))
b3 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L2, W3) + b3
# define cost/loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = hypothesis, labels = Y))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict = feed_dict)
avg_cost += c / total_batch
print("epoch: {epoch}, cost: {cost}".format(
epoch = epoch,
cost = avg_cost
))
# test accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("accuracy: {accuracy}".format(
accuracy = sess.run(
accuracy,
feed_dict = {
X: mnist.test.images,
Y: mnist.test.labels
}
)
))
# select one test example and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("\nlabel:")
print(sess.run(
tf.argmax(mnist.test.labels[r:r + 1], 1)
)
)
print("\nprediction:")
print(sess.run(
tf.argmax(hypothesis, 1),
feed_dict = {X: mnist.test.images[r:r + 1]}
)
)
sess.close()
import random
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
# weights and bias for layers
W1 = tf.get_variable("W1", shape = [784, 256], initializer = tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([256]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
W2 = tf.get_variable("W2", shape = [256, 256], initializer = tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([256]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
W3 = tf.get_variable("W3", shape = [256, 10], initializer = tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L2, W3) + b3
# define cost/loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = hypothesis, labels = Y))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict = feed_dict)
avg_cost += c / total_batch
print("epoch: {epoch}, cost: {cost}".format(
epoch = epoch,
cost = avg_cost
))
# test accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("accuracy: {accuracy}".format(
accuracy = sess.run(
accuracy,
feed_dict = {
X: mnist.test.images,
Y: mnist.test.labels
}
)
))
# select one test example and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("\nlabel:")
print(sess.run(
tf.argmax(mnist.test.labels[r:r + 1], 1)
)
)
print("\nprediction:")
print(sess.run(
tf.argmax(hypothesis, 1),
feed_dict = {X: mnist.test.images[r:r + 1]}
)
)
sess.close()
import random
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
# weights and bias for layers
W1 = tf.get_variable("W1", shape = [784, 512], initializer = tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([512]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
W2 = tf.get_variable("W2", shape = [512, 512], initializer = tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([512]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
W3 = tf.get_variable("W3", shape = [512, 512], initializer = tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([512]))
L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)
W4 = tf.get_variable("W4", shape = [512, 512], initializer = tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([512]))
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
W5 = tf.get_variable("W5", shape = [512, 10], initializer = tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L4, W5) + b5
# define cost/loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = hypothesis, labels = Y))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict = feed_dict)
avg_cost += c / total_batch
print("epoch: {epoch}, cost: {cost}".format(
epoch = epoch,
cost = avg_cost
))
# test accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("accuracy: {accuracy}".format(
accuracy = sess.run(
accuracy,
feed_dict = {
X: mnist.test.images,
Y: mnist.test.labels
}
)
))
# select one test example and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("\nlabel:")
print(sess.run(
tf.argmax(mnist.test.labels[r:r + 1], 1)
)
)
print("\nprediction:")
print(sess.run(
tf.argmax(hypothesis, 1),
feed_dict = {X: mnist.test.images[r:r + 1]}
)
)
sess.close()
import random
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
# dropout (keep_prob) rate 0.7 on training, but should be 1 for testing
keep_prob = tf.placeholder(tf.float32)
# weights and bias for layers
W1 = tf.get_variable("W1", shape = [784, 512], initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([512]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
L1 = tf.nn.dropout(L1, keep_prob = keep_prob)
W2 = tf.get_variable("W2", shape = [512, 512], initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([512]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
L2 = tf.nn.dropout(L2, keep_prob = keep_prob)
W3 = tf.get_variable("W3", shape = [512, 512], initializer = tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([512]))
L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)
L3 = tf.nn.dropout(L3, keep_prob = keep_prob)
W4 = tf.get_variable("W4", shape = [512, 512], initializer = tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([512]))
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob = keep_prob)
W5 = tf.get_variable("W5", shape = [512, 10], initializer = tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L4, W5) + b5
# define cost/loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = hypothesis, labels = Y))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys, keep_prob: 0.7}
c, _ = sess.run([cost, optimizer], feed_dict = feed_dict)
avg_cost += c / total_batch
print("epoch: {epoch}, cost: {cost}".format(
epoch = epoch,
cost = avg_cost
))
# test accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("accuracy: {accuracy}".format(
accuracy = sess.run(
accuracy,
feed_dict = {
X: mnist.test.images,
Y: mnist.test.labels,
keep_prob: 1
}
)
))
# select one test example and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("\nlabel:")
print(sess.run(
tf.argmax(mnist.test.labels[r:r + 1], 1)
)
)
print("\nprediction:")
print(sess.run(
tf.argmax(hypothesis, 1),
feed_dict = {X: mnist.test.images[r:r + 1], keep_prob: 1}
)
)
sess.close()
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
sess = tf.InteractiveSession()
image = np.array(
[[
[[1], [2]],
[[2], [3]]
]],
dtype = np.float32
)
print(image.shape)
plt.imshow(image.reshape(2, 2), cmap = "Greys")
plt.show()
import matplotlib.pyplot as plt
import random
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
X_img = tf.reshape(X, [-1, 28, 28, 1]) # 28x28x1 (black/white)
Y = tf.placeholder(tf.float32, [None, 10])
# L1 ImgIn shape = (?, 28, 28, 1)
W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev = 0.01))
# Conv -> (?, 28, 28, 32)
# Pool -> (?, 14, 14, 32)
L1 = tf.nn.conv2d(X_img, W1, strides = [1, 1, 1, 1], padding = "SAME")
L1 = tf.nn.relu(L1)
L1 = tf.nn.max_pool(L1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")
Tensor("Conv2D:0", shape = (?, 28, 28, 32), dtype = float32)
Tensor("Relu:0", shape = (?, 28, 28, 32), dtype = float32)
Tensor("MaxPool:0", shape = (?, 14, 14, 32), dtype = float32)
# L2 ImgIn shape=(?, 14, 14, 32)
W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev = 0.01))
# Conv ->(?, 14, 14, 64)
# Pool ->(?, 7, 7, 64)
L2 = tf.nn.conv2d(L1, W2, strides = [1, 1, 1, 1], padding = "SAME")
L2 = tf.nn.relu(L2)
L2 = tf.nn.max_pool(L2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")
L2 = tf.reshape(L2, [-1, 7 * 7 * 64])
Tensor("Conv2D_1:0", shape = (?, 14, 14, 64), dtype = float32)
Tensor("Relu_1:0", shape = (?, 14, 14, 64), dtype = float32)
Tensor("MaxPool_1:0", shape = (?, 7, 7, 64), dtype = float32)
Tensor("Reshape_1:0", shape = (?, 3136), dtype = float32)
# final FC 7x7x64 inputs -> 10 outputs
W3 = tf.get_variable("W3", shape=[7 * 7 * 64, 10], initializer = tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L2, W3) + b
# define cost/loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = hypothesis, labels = Y))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict = feed_dict)
avg_cost += c / total_batch
print("epoch: {epoch}, cost: {cost}".format(
epoch = epoch,
cost = avg_cost
))
# test accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("accuracy: {accuracy}".format(
accuracy = sess.run(
accuracy,
feed_dict = {
X: mnist.test.images,
Y: mnist.test.labels
}
)
))
# select one test example and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("\nlabel:")
print(sess.run(
tf.argmax(mnist.test.labels[r:r + 1], 1)
)
)
print("\nprediction:")
print(sess.run(
tf.argmax(hypothesis, 1),
feed_dict = {X: mnist.test.images[r:r + 1]}
)
)
sess.close()
plt.imshow(
mnist.test.images[r:r + 1].
reshape(28, 28),
cmap = "Greys",
interpolation = "nearest"
)
plt.show()
import numpy as np
import skopt
def f(x):
return (np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) * np.random.randn() * 0.1)
result = skopt.gp_minimize(f, [(-2.0, 2.0)])
result["fun"]
result.keys()
from sklearn.datasets import load_boston
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import cross_val_score
from skopt import gp_minimize
boston = load_boston()
X, y = boston.data, boston.target
n_features = X.shape[1]
model = GradientBoostingRegressor(
n_estimators = 50,
random_state = 0
)
def objective(parameters):
max_depth, learning_rate, max_features, min_samples_split, min_samples_leaf = parameters
model.set_params(
max_depth = max_depth,
learning_rate = learning_rate,
max_features = max_features,
min_samples_split = min_samples_split,
min_samples_leaf = min_samples_leaf
)
return -np.mean(
cross_val_score(
model,
X,
y,
cv = 5,
n_jobs = -1,
scoring = "neg_mean_absolute_error"
)
)
# bounds of the dimensions of the search space to explore
space = [
(1, 5), # max_depth
(10 ** -5, 10 ** 0, "log-uniform"), # learning_rate
(1, n_features), # max_features
(2, 100), # min_samples_split
(1, 100) # min_samples_leaf
]
# sequential model-based optimisation
result = gp_minimize(
objective,
space,
n_calls = 100,
random_state = 0
)
print(
best score: {best_score}
best parameters:
- max_depth: {max_depth}
- learning_rate: {learning_rate}
- max_features: {max_features}
- min_samples_split: {min_samples_split}
- min_samples_leaf: {min_samples_leaf}
.format(
best_score = result["fun"],
max_depth = result["x"][0],
learning_rate = result["x"][1],
max_features = result["x"][2],
min_samples_split = result["x"][3],
min_samples_leaf = result["x"][4]
))
import matplotlib.pyplot as plt
import seaborn as sns
from skopt.plots import plot_convergence
%matplotlib inline
sns.set(context = "paper", font = "monospace")
plot_convergence(result)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Type
Step7: 1.4. Elemental Stoichiometry
Step8: 1.5. Elemental Stoichiometry Details
Step9: 1.6. Prognostic Variables
Step10: 1.7. Diagnostic Variables
Step11: 1.8. Damping
Step12: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Step13: 2.2. Timestep If Not From Ocean
Step14: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Step15: 3.2. Timestep If Not From Ocean
Step16: 4. Key Properties --> Transport Scheme
Step17: 4.2. Scheme
Step18: 4.3. Use Different Scheme
Step19: 5. Key Properties --> Boundary Forcing
Step20: 5.2. River Input
Step21: 5.3. Sediments From Boundary Conditions
Step22: 5.4. Sediments From Explicit Model
Step23: 6. Key Properties --> Gas Exchange
Step24: 6.2. CO2 Exchange Type
Step25: 6.3. O2 Exchange Present
Step26: 6.4. O2 Exchange Type
Step27: 6.5. DMS Exchange Present
Step28: 6.6. DMS Exchange Type
Step29: 6.7. N2 Exchange Present
Step30: 6.8. N2 Exchange Type
Step31: 6.9. N2O Exchange Present
Step32: 6.10. N2O Exchange Type
Step33: 6.11. CFC11 Exchange Present
Step34: 6.12. CFC11 Exchange Type
Step35: 6.13. CFC12 Exchange Present
Step36: 6.14. CFC12 Exchange Type
Step37: 6.15. SF6 Exchange Present
Step38: 6.16. SF6 Exchange Type
Step39: 6.17. 13CO2 Exchange Present
Step40: 6.18. 13CO2 Exchange Type
Step41: 6.19. 14CO2 Exchange Present
Step42: 6.20. 14CO2 Exchange Type
Step43: 6.21. Other Gases
Step44: 7. Key Properties --> Carbon Chemistry
Step45: 7.2. PH Scale
Step46: 7.3. Constants If Not OMIP
Step47: 8. Tracers
Step48: 8.2. Sulfur Cycle Present
Step49: 8.3. Nutrients Present
Step50: 8.4. Nitrous Species If N
Step51: 8.5. Nitrous Processes If N
Step52: 9. Tracers --> Ecosystem
Step53: 9.2. Upper Trophic Levels Treatment
Step54: 10. Tracers --> Ecosystem --> Phytoplankton
Step55: 10.2. Pft
Step56: 10.3. Size Classes
Step57: 11. Tracers --> Ecosystem --> Zooplankton
Step58: 11.2. Size Classes
Step59: 12. Tracers --> Disolved Organic Matter
Step60: 12.2. Lability
Step61: 13. Tracers --> Particules
Step62: 13.2. Types If Prognostic
Step63: 13.3. Size If Prognostic
Step64: 13.4. Size If Discrete
Step65: 13.5. Sinking Speed If Prognostic
Step66: 14. Tracers --> Dic Alkalinity
Step67: 14.2. Abiotic Carbon
Step68: 14.3. Alkalinity
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-2', 'sandbox-2', 'ocnbgchem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'miroc', 'miroc-es2l', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate data
Step2: EM Algorithm
Step3: Division by zero should be avoided
Step4: Local Minima problem
Step5: What if we minimize the perpendicular distance to the line
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
n = 20 # number of datapoints in each line
v1 = np.array([-13, 0.9]) # first line
v2 = np.array([7, -1]) # second line
sig = 1.0
seq = np.array(range(n))+1
x = np.transpose(np.array([np.ones(n), seq])) # Half of Design Matrix as there are two y's for each x
line1 = np.zeros(n)
line2 = np.zeros(n)
for i in range(n):
line1[i] = np.random.normal(np.dot(v1, x[i]), sig)
line2[i] = np.random.normal(np.dot(v2, x[i]), sig)
plt.plot(seq, line1, 'ro')
plt.plot(seq, line2, 'b+')
plt.show()
# Constants
N = 2*n # total number of data points
X = np.vstack((x, x)) # Design Matrix;
Y = np.concatenate((line1, line2)) # Target vector;
# Initialize the parameters
p1 = np.random.rand()
p2 = 1-p1
w1 = np.array([np.random.normal(0, 5), np.random.normal(0,2)])
w2 = np.array([np.random.normal(0, 5), np.random.normal(0,2)])
# Press ^Enter to run each iteration
# E Step: compute gammas (dividing directly causes 0 by 0 division. Go log)
p_ = -0.5/(sig*sig) * (np.dot(X, w1) - Y)**2
q_ = -0.5/(sig*sig) * (np.dot(X, w2) - Y)**2
g1 = np.exp(-np.log(1 + np.exp(p_ - q_)*p2/p1))
g2 = 1-g1 # np.exp(-np.log(1 + np.exp(q_ - p_)*p1/p2))
# M Step: recompute pi and w
p1 = np.sum(g1)/N
p2 = np.sum(g2)/N
assert (np.abs(p1+p2-1) < 0.000001), "Not normalized - sad face puppy face"
Z1 = np.diag(g1)
Z2 = np.diag(g2)
T1 = np.dot(np.transpose(X), Z1) # X'*Z1
T2 = np.dot(np.transpose(X), Z2) # X'*Z2
w1 = np.dot(np.linalg.pinv(np.dot(T1, X)), np.dot(T1, Y))
w2 = np.dot(np.linalg.pinv(np.dot(T2, X)), np.dot(T2, Y))
print w1, w2, p1, p2
plt.plot(seq, line1, 'ro')
plt.plot(seq, line2, 'b+')
plt.plot(seq, np.dot(x, w1))
plt.plot(seq, np.dot(x, w2))
plt.show()
def closer_to_first(d, w1, w2):
# d[0] is x and d[1] is y
return 1 if (np.dot(w1, d[0])-d[1])**2 <= (np.dot(w2, d[0])-d[1])**2 else 0
# Initialization
w1 = np.array([np.random.normal(0, 5), np.random.normal(0,2)])
w2 = np.array([np.random.normal(0, 5), np.random.normal(0,2)])
# Assign each point to the nearest line
z1 = np.array([closer_to_first(d, w1, w2) for d in zip(X, Y)])
Z1 = np.diag(z1)
Z2 = np.diag(1-z1)
# recompute the equations of lines - same as M step in EM algorithm
T1 = np.dot(np.transpose(X), Z1) # X'*Z1
T2 = np.dot(np.transpose(X), Z2) # X'*Z2
w1 = np.dot(np.linalg.pinv(np.dot(T1, X)), np.dot(T1, Y))
w2 = np.dot(np.linalg.pinv(np.dot(T2, X)), np.dot(T2, Y))
print w1, w2
plt.plot(seq, line1, 'ro')
plt.plot(seq, line2, 'b+')
plt.plot(seq, np.dot(x, w1))
plt.plot(seq, np.dot(x, w2))
plt.show()
def perpend_to_first(d, w1, w2):
# d[0] is x and d[1] is y
return 1 if np.abs(np.dot(w1, d[0])-d[1])/np.linalg.norm([1,w1[1]]) <= np.abs(np.dot(w2, d[0])-d[1])/np.linalg.norm([1,w2[1]]) else 0
# Initialization
w1 = np.array([np.random.normal(0, 5), np.random.normal(0,2)])
w2 = np.array([np.random.normal(0, 5), np.random.normal(0,2)])
# Assign each point to the nearest line
z1 = np.array([perpend_to_first(d, w1, w2) for d in zip(X, Y)])
Z1 = np.diag(z1)
Z2 = np.diag(1-z1)
# recompute the equations of lines - same as M step in EM algorithm
T1 = np.dot(np.transpose(X), Z1) # X'*Z1
T2 = np.dot(np.transpose(X), Z2) # X'*Z2
w1 = np.dot(np.linalg.pinv(np.dot(T1, X)), np.dot(T1, Y))
w2 = np.dot(np.linalg.pinv(np.dot(T2, X)), np.dot(T2, Y))
print w1, w2
plt.plot(seq, line1, 'ro')
plt.plot(seq, line2, 'b+')
plt.plot(seq, np.dot(x, w1))
plt.plot(seq, np.dot(x, w2))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python versions
Step2: Booleans
Step3: more string methods can be found here
Step4: List comprehensions
Step5: You can make this code simpler using a list comprehension
Step6: dictionaries
Step7: Tuples
Step8: Functions
Step9: Classes
Step10: Numpy
Step11: Array indexing
Step12: <b>Integer array indexing</b>
Step13: One useful trick with integer array indexing is selecting or mutating one element from each row of a matrix
Step14: <b>Boolean array indexing</b>
Step15: Datatypes
Step16: Array math
Step17: Note that unlike MATLAB, * is elementwise multiplication, not matrix multiplication. We instead use the dot function to compute inner products of vectors, to multiply a vector by a matrix, and to multiply matrices. dot is available both as a function in the numpy module and as an instance method of array objects
Step18: Numpy provides many useful functions for performing computations on arrays; one of the most useful is sum
Step19: Apart from computing mathematical functions using arrays, we frequently need to reshape or otherwise manipulate data in arrays. The simplest example of this type of operation is transposing a matrix; to transpose a matrix, simply use the T attribute of an array object
Step20: Broadcasting
Step21: The line y = x + v works even though x has shape (4, 3) and v has shape (3,) due to broadcasting; this line works as if v actually had shape (4, 3), where each row was a copy of v, and the sum was performed elementwise.
Step22: SciPy
Step23: MATLAB files
Step24: Matplotlib
Step25: Subplots
|
<ASSISTANT_TASK:>
Python Code:
def quicksort(arr):
if len(arr) <= 1:
return arr
pivot = arr[len(arr) / 2]
left = [x for x in arr if x < pivot]
middle = [x for x in arr if x == pivot]
right = [x for x in arr if x > pivot]
return quicksort(left) + middle + quicksort(right)
print quicksort([3,6,8,10,1,2,1])
x = 3
print type(x) # Prints "<type 'int'>"
print x # Prints "3"
print x + 1 # Addition; prints "4"
print x - 1 # Subtraction; prints "2"
print x * 2 # Multiplication; prints "6"
print x ** 2 # Exponentiation; prints "9"
x += 1
print x # Prints "4"
x *= 2
print x # Prints "8"
y = 2.5
print type(y) # Prints "<type 'float'>"
print y, y + 1, y * 2, y ** 2 # Prints "2.5 3.5 5.0 6.25"
t = True
f = False
print type(t) # Prints "<type 'bool'>"
print t and f # Logical AND; prints "False"
print t or f # Logical OR; prints "True"
print not t # Logical NOT; prints "False"
print t != f # Logical XOR; prints "True"
# python has great support for strings
hello = 'hello' # String literals can use single quotes
world = "world" # or double quotes; it does not matter.
print hello # Prints "hello"
print len(hello) # String length; prints "5"
hw = hello + ' ' + world # String concatenation
print hw # prints "hello world"
hw12 = '%s %s %d' % (hello, world, 12) # sprintf style string formatting
print hw12 # prints "hello world 12"
# String objects have a bunch of useful methods; for example:
s = "hello"
print s.capitalize() # Capitalize a string; prints "Hello"
print s.upper() # Convert a string to uppercase; prints "HELLO"
print s.rjust(7) # Right-justify a string, padding with spaces; prints " hello"
print s.center(7) # Center a string, padding with spaces; prints " hello "
print s.replace('l', '(ell)') # Replace all instances of one substring with another;
# prints "he(ell)(ell)o"
print ' world '.strip() # Strip leading and trailing whitespace; prints "world"
# A list is the Python equivalent of an array, but is resizeable and can contain
# elements of different types:
xs = [3, 1, 2] # Create a list
print xs, xs[2] # Prints "[3, 1, 2] 2"
print xs[-1] # Negative indices count from the end of the list; prints "2"
xs[2] = 'foo' # Lists can contain elements of different types
print xs # Prints "[3, 1, 'foo']"
xs.append('bar') # Add a new element to the end of the list
print xs # Prints "[3, 1, 'foo', 'bar']"
x = xs.pop() # Remove and return the last element of the list
print x, xs # Prints "bar [3, 1, 'foo']"
nums = range(5) # range is a built-in function that creates a list of integers
print nums # Prints "[0, 1, 2, 3, 4]"
print nums[2:4] # Get a slice from index 2 to 4 (exclusive); prints "[2, 3]"
print nums[2:] # Get a slice from index 2 to the end; prints "[2, 3, 4]"
print nums[:2] # Get a slice from the start to index 2 (exclusive); prints "[0, 1]"
print nums[:] # Get a slice of the whole list; prints ["0, 1, 2, 3, 4]"
print nums[:-1] # Slice indices can be negative; prints ["0, 1, 2, 3]"
nums[2:4] = [8, 9] # Assign a new sublist to a slice
print nums # Prints "[0, 1, 8, 9, 4]"
animals = ['cat', 'dog', 'monkey']
for animal in animals:
print animal
# Prints "cat", "dog", "monkey", each on its own line.
nums = [0, 1, 2, 3, 4]
squares = []
for x in nums:
squares.append(x ** 2)
print squares # Prints [0, 1, 4, 9, 16]
# list comprehension squares
[x ** 2 for x in nums]
# even squares - list comprehension also take conditions
[x ** 2 for x in nums if x%2==0]
d = {'cat': 'cute', 'dog': 'furry'} # Create a new dictionary with some data
print d['cat'] # Get an entry from a dictionary; prints "cute"
print 'cat' in d # Check if a dictionary has a given key; prints "True"
d['fish'] = 'wet' # Set an entry in a dictionary
print d['fish'] # Prints "wet"
# print d['monkey'] # KeyError: 'monkey' not a key of d
print d.get('monkey', 'N/A') # Get an element with a default; prints "N/A"
print d.get('fish', 'N/A') # Get an element with a default; prints "wet"
del d['fish'] # Remove an element from a dictionary
print d.get('fish', 'N/A') # "fish" is no longer a key; prints "N/A"
d = {'person': 2, 'cat': 4, 'spider': 8}
for animal in d:
legs = d[animal]
print 'A %s has %d legs' % (animal, legs)
# Prints "A person has 2 legs", "A spider has 8 legs", "A cat has 4 legs"
d = {'person': 2, 'cat': 4, 'spider': 8}
for animal, legs in d.iteritems():
print 'A %s has %d legs' % (animal, legs)
# Prints "A person has 2 legs", "A spider has 8 legs", "A cat has 4 legs"
nums = [0, 1, 2, 3, 4]
even_num_to_square = {x:x**2 for x in nums if x %2 == 0}
print even_num_to_square # Prints "{0: 0, 2: 4, 4: 16}"
animals = {'cat', 'dog'}
print 'cat' in animals # Check if an element is in a set; prints "True"
print 'fish' in animals # prints "False"
animals.add('fish') # Add an element to a set
print 'fish' in animals # Prints "True"
print len(animals) # Number of elements in a set; prints "3"
animals.add('cat') # Adding an element that is already in the set does nothing
print len(animals) # Prints "3"
animals.remove('cat') # Remove an element from a set
print len(animals) # Prints "2"
animals = {'cat', 'dog', 'fish'}
for idx, animal in enumerate(animals):
print '#%d: %s' % (idx + 1, animal)
# Prints "#1: fish", "#2: dog", "#3: cat"
from math import sqrt
nums = {int(sqrt(x)) for x in range(30)}
print nums # Prints "set([0, 1, 2, 3, 4, 5])"
d = {(x, x + 1): x for x in range(10)} # Create a dictionary with tuple keys
t = (5, 6) # Create a tuple
print type(t) # Prints "<type 'tuple'>"
print d[t] # Prints "5"
print d[(1, 2)] # Prints "1"
def sign(x):
if x > 0:
return 'positive'
elif x < 0:
return 'negative'
else:
return 'zero'
for x in [-1, 0, 1]:
print sign(x)
# Prints "negative", "zero", "positive"
def hello(name, loud=False):
if loud:
print 'HELLO, %s!' % name.upper()
else:
print 'Hello, %s' % name
hello('Bob') # Prints "Hello, Bob"
hello('Fred', loud=True) # Prints "HELLO, FRED!"
class Greeter(object):
# Constructor
def __init__(self, name):
self.name = name # Create an instance variable
# Instance method
def greet(self, loud=False):
if loud:
print 'HELLO, %s!' % self.name.upper()
else:
print 'Hello, %s' % self.name
g = Greeter('Fred') # Construct an instance of the Greeter class
g.greet() # Call an instance method; prints "Hello, Fred"
g.greet(loud=True) # Call an instance method; prints "HELLO, FRED!"
import numpy as np
a = np.array([1,2,3])
print type(a)
print a.shape
print a[0], a[1], a[2]
a[0] = 5
print a
b = np.array([[1,2,3],[4,5,6]])
print b.shape
print b[0,0], b[0,1], b[1,0]
#Numpy also provides many functions to create arrays:
a = np.zeros((2,2))
print a
b = np.ones((1,2))
print b
c = np.full((2,2),7) # Create a constant array
print c
d = np.eye(2) # Create a 2x2 identity matrix
print d
e = np.random.random((2,2)) #create an array filled with random values
print e
import numpy as np
# Create the following rank 2 array with shape (3, 4)
# [[ 1 2 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
# Use slicing to pull out the subarray consisting of the first 2 rows
# and columns 1 and 2; b is the following array of shape (2, 2):
# [[2 3]
# [6 7]]
b = a[:2, 1:3]
print b
print a
# A slice of an array is a view into the same data, so modifying it
# will modify the original array.
print a[0, 1] # Prints "2"
b[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1]
print a[0, 1] # Prints "77"
import numpy as np
# Create the following rank 2 array with shape (3, 4)
# [[ 1 2 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
row_r1 = a[1, :]
row_r2 = a[1:2, :]
# Two ways of accessing the data in the middle row of the array.
# Mixing integer indexing with slices yields an array of lower rank,
# while using only slices yields an array of the same rank as the
# original array:
row_r1 = a[1, :] # Rank 1 view of the second row of a
row_r2 = a[1:2, :] # Rank 2 view of the second row of a
print row_r1, row_r1.shape # Prints "[5 6 7 8] (4,)"
print row_r2, row_r2.shape # Prints "[[5 6 7 8]] (1, 4)"
# We can make the same distinction when accessing columns of an array:
col_r1 = a[:, 1]
col_r2 = a[:, 1:2]
print col_r1, col_r1.shape # Prints "[ 2 6 10] (3,)"
print col_r2, col_r2.shape # Prints "[[ 2]
# [ 6]
# [10]] (3, 1)"
import numpy as np
a = np.array([[1,2], [3, 4], [5, 6]])
# An example of integer array indexing.
# The returned array will have shape (3,) and
print a[[0, 1, 2], [0, 1, 0]] # Prints "[1 4 5]"
# The above example of integer array indexing is equivalent to this:
print np.array([a[0, 0], a[1, 1], a[2, 0]]) # Prints "[1 4 5]"
# When using integer array indexing, you can reuse the same
# element from the source array:
print a[[0, 0], [1, 1]] # Prints "[2 2]"
# Equivalent to the previous integer array indexing example
print np.array([a[0, 1], a[0, 1]]) # Prints "[2 2]"
import numpy as np
print np.arange(4)
# Create a new array from which we will select elements
a = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
print a # prints "array([[ 1, 2, 3],
# [ 4, 5, 6],
# [ 7, 8, 9],
# [10, 11, 12]])"
# Create an array of indices
b = np.array([0, 2, 0, 1])
# Select one element from each row of a using the indices in b
print a[np.arange(4), b] # Prints "[ 1 6 7 11]"
# Mutate one element from each row of a using the indices in b
a[np.arange(4), b] += 10
print a # prints "array([[11, 2, 3],
# [ 4, 5, 16],
# [17, 8, 9],
# [10, 21, 12]])
import numpy as np
a = np.array([[1,2], [3, 4], [5, 6]])
bool_idx = (a > 2) # Find the elements of a that are bigger than 2;
# this returns a numpy array of Booleans of the same
# shape as a, where each slot of bool_idx tells
# whether that element of a is > 2.
print bool_idx # Prints "[[False False]
# [ True True]
# [ True True]]"
# We use boolean array indexing to construct a rank 1 array
# consisting of the elements of a corresponding to the True values
# of bool_idx
print a[bool_idx] # Prints "[3 4 5 6]"
# We can do all of the above in a single concise statement:
print a[a > 2] # Prints "[3 4 5 6]"
import numpy as np
x = np.array([1, 2]) # Let numpy choose the datatype
print x.dtype # Prints "int64"
x = np.array([1.0, 2.0]) # Let numpy choose the datatype
print x.dtype # Prints "float64"
x = np.array([1, 2], dtype=np.int64) # Force a particular datatype
print x.dtype # Prints "int64"
import numpy as np
x = np.array([[1,2],[3,4]], dtype=np.float64)
y = np.array([[5,6],[7,8]], dtype=np.float64)
print x
print y
# Elementwise sum; both produce the array
# [[ 6.0 8.0]
# [10.0 12.0]]
print x + y
print np.add(x, y)
# Elementwise difference; both produce the array
# [[-4.0 -4.0]
# [-4.0 -4.0]]
print x - y
print np.subtract(x, y)
# Elementwise product; both produce the array
# [[ 5.0 12.0]
# [21.0 32.0]]
print x * y
print np.multiply(x, y)
# Elementwise division; both produce the array
# [[ 0.2 0.33333333]
# [ 0.42857143 0.5 ]]
print x / y
print np.divide(x, y)
# Elementwise square root; produces the array
# [[ 1. 1.41421356]
# [ 1.73205081 2. ]]
print np.sqrt(x)
import numpy as np
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
v = np.array([9,10])
w = np.array([11, 12])
# Inner product of vectors; both produce 219
print v.dot(w)
print np.dot(v, w)
# Matrix / vector product; both produce the rank 1 array [29 67]
print x.dot(v)
print np.dot(x, v)
# Matrix / matrix product; both produce the rank 2 array
# [[19 22]
# [43 50]]
print x.dot(y)
print np.dot(x, y)
import numpy as np
x = np.array([[1,2],[3,4]])
print np.sum(x) # Compute sum of all elements; prints "10"
print np.sum(x, axis=0) # Compute sum of each column; prints "[4 6]"
print np.sum(x, axis=1) # Compute sum of each row; prints "[3 7]"
import numpy as np
x = np.array([[1,2], [3,4]])
print x # Prints "[[1 2]
# [3 4]]"
print x.T # Prints "[[1 3]
# [2 4]]"
# Note that taking the transpose of a rank 1 array does nothing:
v = np.array([1,2,3])
print v # Prints "[1 2 3]"
print v.T # Prints "[1 2 3]"
import numpy as np
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = np.empty_like(x) # Create an empty matrix with the same shape as x
# Add the vector v to each row of the matrix x with an explicit loop
for i in range(4):
y[i, :] = x[i, :] + v
# Now y is the following
# [[ 2 2 4]
# [ 5 5 7]
# [ 8 8 10]
# [11 11 13]]
print y
import numpy as np
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
print x
print ''
print v
print ''
vv = np.tile(v, (4, 1)) # Stack 4 copies of v on top of each other
print vv # Prints "[[1 0 1]
# [1 0 1]
# [1 0 1]
# [1 0 1]]"
y = x + vv # Add x and vv elementwise
print y # Prints "[[ 2 2 4
# [ 5 5 7]
# [ 8 8 10]
# [11 11 13]]"
import numpy as np
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[0,0,0], [1,2,3], [4,5,6]])
v = np.array([1, 2, 3])
y = x + v # Add v to each row of x using broadcasting
print y # Prints "[[ 2 2 4]
# [ 5 5 7]
# [ 8 8 10]
# [11 11 13]]"
import numpy as np
# Compute outer product of vectors
v = np.array([1,2,3]) # v has shape (3,)
w = np.array([4,5]) # w has shape (2,)
# To compute an outer product, we first reshape v to be a column
# vector of shape (3, 1); we can then broadcast it against w to yield
# an output of shape (3, 2), which is the outer product of v and w:
# [[ 4 5]
# [ 8 10]
# [12 15]]
print 'np.reshape(v, (3, 1))'
print np.reshape(v, (3, 1))
print 'w'
print w
print '(vT . w)'
print np.reshape(v, (3, 1)) * w
# Add a vector to each row of a matrix
x = np.array([[1,2,3], [4,5,6]])
# x has shape (2, 3) and v has shape (3,) so they broadcast to (2, 3),
# giving the following matrix:
# [[2 4 6]
# [5 7 9]]
print 'x'
print x
print 'v'
print v
print 'x + v'
print x + v
# Add a vector to each column of a matrix
# x has shape (2, 3) and w has shape (2,).
# If we transpose x then it has shape (3, 2) and can be broadcast
# against w to yield a result of shape (3, 2); transposing this result
# yields the final result of shape (2, 3) which is the matrix x with
# the vector w added to each column. Gives the following matrix:
# [[ 5 6 7]
# [ 9 10 11]]
print '(x.T + w).T'
print (x.T + w).T
# Another solution is to reshape w to be a row vector of shape (2, 1);
# we can then broadcast it directly against x to produce the same
# output.
print 'x + np.reshape(w, (2, 1))'
print x + np.reshape(w, (2, 1))
# Multiply a matrix by a constant:
# x has shape (2, 3). Numpy treats scalars as arrays of shape ();
# these can be broadcast together to shape (2, 3), producing the
# following array:
# [[ 2 4 6]
# [ 8 10 12]]
print 'x * 2'
print x * 2
from scipy.misc import imread, imsave, imresize, imshow
# Read an JPEG image into a numpy array
img = imread('assets/cat.jpg')
print img.dtype, img.shape # Prints "uint8 (400, 248, 3)"
# We can tint the image by scaling each of the color channels
# by a different scalar constant. The image has shape (400, 248, 3);
# we multiply it by the array [1, 0.95, 0.9] of shape (3,);
# numpy broadcasting means that this leaves the red channel unchanged,
# and multiplies the green and blue channels by 0.95 and 0.9
# respectively.
img_tinted = img * [1, 0.95, 0.9]
# Resize the tinted image to be 300 by 300 pixels.
img_tinted = imresize(img_tinted, (300, 300))
# Write the tinted image back to disk
imsave('assets/cat_tinted.jpg', img_tinted)
%matplotlib inline
import matplotlib.pyplot as plt
# Show the original image
plt.subplot(1, 2, 1)
plt.imshow(img)
# Show the tinted image
plt.subplot(1, 2, 2)
# A slight gotcha with imshow is that it might give strange results
# if presented with data that is not uint8. To work around this, we
# explicitly cast the image to uint8 before displaying it.
plt.imshow(np.uint8(img_tinted))
plt.show()
import numpy as np
from scipy.spatial.distance import pdist, squareform
# Create the following array where each row is a point in 2D space:
# [[0 1]
# [1 0]
# [2 0]]
x = np.array([[0, 1], [1, 0], [2, 0]])
print x
# Compute the Euclidean distance between all rows of x.
# d[i, j] is the Euclidean distance between x[i, :] and x[j, :],
# and d is the following array:
# [[ 0. 1.41421356 2.23606798]
# [ 1.41421356 0. 1. ]
# [ 2.23606798 1. 0. ]]
dist = pdist(x, 'euclidean')
print dist
d = squareform(pdist(x, 'euclidean'))
print d
import numpy as np
import matplotlib.pyplot as plt
# Compute the x and y coordinates for points on a sine curve
x = np.arange(0, 3 * np.pi, 0.1)
y = np.sin(x)
# Plot the points using matplotlib
plt.plot(x, y)
plt.show() # You must call plt.show() to make graphics appear.
import numpy as np
import matplotlib.pyplot as plt
# Compute the x and y coordinates for points on sine and cosine curves
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
# Plot the points using matplotlib
plt.plot(x, y_sin)
plt.plot(x, y_cos)
plt.xlabel('x axis label')
plt.ylabel('y axis label')
plt.title('Sine and Cosine')
plt.legend(['Sine and Cosine'])
plt.show()
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
plt.subplot(2,1,1)
plt.plot(x,y_sin)
plt.title('Sine')
plt.subplot(2,1,2)
plt.plot(x,y_cos)
plt.title('Cosine')
plt.show()
import numpy as np
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
img = imread('assets/cat.jpg')
img_tinted = img * [1, 0.95, 0.9]
# Show the original image
plt.subplot(1, 2, 1)
plt.imshow(img)
# Show the tinted image
plt.subplot(1, 2, 2)
# A slight gotcha with imshow is that it might give strange results
# if presented with data that is not uint8. To work around this, we
# explicitly cast the image to uint8 before displaying it.
plt.imshow(np.uint8(img_tinted))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Request 2
Step2: Request 3
Step3: Request 4
Step4: On a side note
|
<ASSISTANT_TASK:>
Python Code:
fullbase = requests.compat.urljoin(baseurl, endpoint_datatypes)
r = requests.get(
fullbase,
headers=custom_headers,
# params={'limit':1000},
params={'limit':1000, 'datasetid':"NORMAL_DLY"},
)
r.headers
r.text
json.loads(r.text)
fullbase = requests.compat.urljoin(baseurl, endpoint_data)
r = requests.get(
fullbase,
headers=custom_headers,
params=params,
)
json.loads(r.text)
r.headers
fullbase = requests.compat.urljoin(baseurl, endpoint_datasets)
r = requests.get(
fullbase,
headers=custom_headers,
)
json.loads(r.text)
for station in all_stations:
path = os.path.join(endpoint_stations, "GHCND:{}".format(station))
fullbase = requests.compat.urljoin(baseurl, path)
r = requests.get(
fullbase,
headers=custom_headers,
)
print(json.dumps(json.loads(r.text), indent=2))
fullbase = requests.compat.urljoin(baseurl, endpoint_stations, "GHCND:{}".format(station))
fullbase
0o77
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: 1.2 Unpacking Elements from Iterables of Arbitrary Length
Step6: Discussion
Step8: 1.3 Keeping the Last N Items (in list queue with deque)
Step9: Generator functions (with yield) are common when searching for items. This decouples the process of searching from the code that uses results
Step10: 1.4 Finding the Largest or Smallest N Items
Step11: Discussion
Step12: Discussion
Step13: 1.7 Keepping Dictionaries in Order
Step14: Discussion
Step15: Discussion
Step16: 1.9 Finding Commonalities in Two Dictionaries
Step17: Discussion
Step18: Discussion
|
<ASSISTANT_TASK:>
Python Code:
p = (4, 5, 6, 7)
x, y, z, w = p # x -> 4
data = ['ACME', 50, 91.1, (2012, 12, 21)]
name, _, price, date = data # name -> 'ACME', data -> (2012, 12, 21)
s = 'Hello'
a, b, c, d, e = s # a -> H
p = (4, 5)
x, y, z = p # "ValueError"
def drop_first_last(grades):
Drop first and last exams, then average the rest.
first, *middle, last = grades
return avg(middle)
def arbitrary_numbers():
Name and email followed by phone number(s).
record = ('Dave', 'dave@example.com', '555-555-5555', '555-555-5544')
name, email, *phone_numbers = record # phone_number always a list
return phone_numbers
def recent_to_first_n():
Most recent quarter compares to the average of the first n.
sales_records = ('23.444', '234.23', '0', 23.12, '15.56')
*trailing_qtrs, current_qtr = sales_record
trailing_avg = sum(trailing_qtrs) / len(trailing_qtrs)
return avg_comparison(trailing_avg, current_qtr)
####### 1 ##############
records = [ ('foo', 1, 2), ('bar', 'hello'), ('foo', 3, 4) ]
def do_foo(x, y):
print('foo', x, y)
def do_bar(s):
print('bar', s)
for tag, *args in records:
if tag == 'foo':
do_foo(*args)
elif tag == 'bar':
do_bar(*args)
#########################
######## 2 ##############
line = 'nobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false'
uname, *fields, homedir, sh = line.split(':') # uname -> nobody
#########################
######### 3 #############
record = ('ACME', 50, 123, 45, (12, 18, 2012))
name, *_, (*_, year) = record # name and year
#########################
######### 4 #############
def sum(items):
Recursions are not recommended w/ Python.
head, *tail = items
return head + sum(*tail) if tail else head
#########################
from collections import deque
def search(lines, pattern, history=5):
Returns a line that matches the pattern and 5 previous lines
previous_lines = deque(maxlen=history) # a generator of a list with max length
for line in lines:
if pattern in line:
yield line, previous_lines
previous_lines.append(line)
# Example use on a file
if __name__ == '__main__':
with open('somefile.txt') as f:
for line, prevlines in search(f, 'python', 5):
for pline in prevlines:
print(pline, end='')
print(line, end='')
print('-' * 20)
######## 1, 2, 3 ########
q = deque(maxlen=3)
q.append(1)
q.appendleft(4)
q.pop() # 1
q.popleft() # 4
#########################
import heapq
nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2]
print(heapq.nlargest(3, nums)) # [42, 37 ,23]
print(heapq.nsmallest(3, nums)) # [-4, 1, 2]
heap.heappop(nums) # -4
# use key parameter to use with complicated data structures
portfolio = [
{'name': 'IBM', 'shares': 100, 'price': 91.1},
{'name': 'AAPL', 'shares': 50, 'price': 543.22},
{'name': 'FB', 'shares': 200, 'price': 21.09},
{'name': 'HPQ', 'shares': 35, 'price': 31.75},
{'name': 'YHOO', 'shares': 45, 'price': 16.35},
{'name': 'ACME', 'shares': 75, 'price': 115.65}
]
cheap = heapq.nsmallest(3, portfolio, key=lambda s: s['price'])
expensive = heapq.nlargest(3, portfolio, key=lambda s: s['price'])
# if N is close to the size of the items:
sorted(nums)[:N] # a better approach
import heapq
class PriorityQueue:
def __init__(self):
self._queue = []
self._index = 0
def __repr__(self):
return 'PriorityQueue({}) with index({})'.format(self._queue, self._index)
def push(self, item, priority):
heapq.heappush(self._queue, (-priority, self._index, item)) # heappush(list, ())
self._index += 1
def pop(self):
return heapq.heappop(self._queue)[-1] # self_queue includes [(priority, index, item)]
class Item:
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Item({!r})'.format(self.name)
q = PriorityQueue()
print(q)
q.push(Item('foo'), 1)
print(q)
q.push(Item('bar'), 5)
print(q)
q.push(Item('spam'), 4)
print(q)
q.push(Item('grok'), 1)
print(q)
q.pop() # -> Item('bar')
print(q)
q.pop() # -> Item('spam')
print(q)
q.pop() # -> Item('foo')
print(q)
q.pop() # -> Item('grok')
print(q)
# foo and grok were popped in the same order in which they were inserted
from collections import defaultdict
d = defaultdict(list) # multiple values will be added to a list
d['a'].append(1)
d['a'].append(2)
d['b'].append(4)
d = defaultdict(set) # multiple values will be added to a set
d['a'].add(1)
d['b'].add(2)
d['a'].add(5)
# Messier setdefault
d = {}
d.setdefault('a', []).append(1)
d.setdefault('a', []).append(2) # will add to the existing list
# Even messier
d = {}
for key, value in paiers:
if key not in d:
d[key] = []
d[key].append(value)
# Best!
d = defaultdict(list)
for key, value in pairs:
d[key].append(value)
from collections import OrderedDict
d = OrderedDict()
d['foo'] = 1
d['bar'] = 2
d['spam'] = 3
d['grok'] = 4
for key in d:
print(key, d[key]) # -> 'foo 1', 'bar 2', 'spam 3', 'grok 4'
# Use when serializing JSON
import json
json.dumps(d) # -> '{"foo": 1, "bar": 2, "spam": 3, "grok": 4}'
prices = {
'ACME': 45.23,
'AAPL': 612.78,
'IBM': 205.55,
'HPQ': 37.20,
'FB': 10.75
}
# to get calculated values first reverse and zip
min_price = min(zip(prices.values(), prices.keys())) # (10.75, 'FB')
max_price = max(zip(prices.values(), prices.keys())) # (612.78, 'AAPL')
# to rank the data use zip with sorted
prices_sorted = sorted(zip(prices.values(), prices.keys())) # [(10.75, 'FB'), (37.2, 'HPQ')...]
# the iterator can be consumed only once
prices_and_names = zip(prices.values(), prices.keys())
print(min(prices_and_names)) # result OK
print(max(prices_and_names)) # ValueError: max() arg is an empty sequence
#### 1 #############
min(prices) # 'AAPL'
max(prices) # 'IBM'
#### 2 ############
min(prices.values()) # 10.75
max(prices.values()) # 612.78
#### 3 ############
min(prices, key=lambda k: prices[k]) # 'FB'
max(prices, key=lambda k: prices[k]) # 'AAPL' -> perfrom calculation on values and return key
# to get the value as well as the key, additionally:
min_key = min(prices, key=lambda k: prices[k])
min_value = prices[min(prices, key=lambda k: prices[k])]
#### 4, 5 #########
prices = { 'AAA' : 45.23, 'ZZZ': 45.23 }
min(zip(prices.values(), prices.keys())) # (45.23, 'AAA')
max(zip(prices.values(), prices.keys())) # (45.23, 'ZZZ')
a={
'x' : 1,
'y' : 2,
'z' : 3
}
b={
'w' : 10,
'x' : 11,
'y' : 2
}
# find keys in common
a.keys() & b.keys() # {'x', 'y'}
# find keys in a that are not in b
a.keys() - b.keys() # {'z'}
# find (key, value) pairs in common
a.items() & b.items() # {('y', 2)}
# alter/filter dictionary contents - make a new dict with selected keys removed
c = { key: a[key] for key in a.keys() - {'z', 'w'}} # {'x': 1, 'y': 2}
###### 1 #########
def dedupe(items):
''' Add a unique item to the seen, and then check agains seen.'''
seen = set()
for item in items:
if item not in seen:
yield item
seen.add(item)
a = [1, 5, 2, 1, 9, 1, 5, 10]
list(dedupe(a)) # [1, 5, 2, 9, 10]
##### 2 ##########
def dedupe(items, key=None): # key is similar to min/max/sorted
''' Purpose of the key argument is to specify a function(lambda)
that converts sequence items into a hashable type for the
purposes of duplicate detection.
'''
seen = set()
for item in items:
val = item if key is None else key(item) # key could be lambda of values, keys, etc.
if val not in seen:
yield item
seen.add(val)
a = [ {'x':1, 'y':2}, {'x':1, 'y':3}, {'x':1, 'y':2}, {'x':2, 'y':4}]
# remove duplicates based on x/y values
list(dedupe(a, key=lambda d: (d['x'], d['y']))) # [{'x': 1, 'y': 2}, {'x': 1, 'y': 3}, {'x': 2, 'y': 4}]
##### 3 #########
# remove duplicates based on x values - for each item in "a" sequence execute the lambda function
list(dedupe(a, key=lambda d: d['x'])) # [{'x': 1, 'y': 2}, {'x': 2, 'y': 4}]
# let's eliminate duplicate lines from a file using the dedupe(items, key=None) generator
with open('somefile.txt', 'r') as f:
# the generator will spit out a single value (line) at a time,
# while keeping track (a pointer) to where it is located during each yield
for line in dedupe(f):
# process unique lines
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: With the <tt>git_bin</tt>, we can execute almost any Git command we like directly. In our hypothetical use case, we want to retrieve some information about the change frequency of files. For this, we need the complete history of the Git repo including statistics for the changed files (via <tt>--numstat</tt>).
Step2: We now read in the complete files' history in the <tt>git_log</tt> variable. Don't let confuse you by all the <tt>\t</tt> characters.
Step3: The last steps are easy. We fill all the empty file statistics rows with the commit's metadata.
Step4: And drop all the commit metadata rows that don't contain file statitics.
Step5: We are finished! This is it.
Step6: Bonus section
Step7: After this, we have to tell Git which information we want. We can do this via the <tt>pretty-format</tt> option.
Step8: OK, this part is ready, let's have a look at the file statistics!
Step9: Discussion
|
<ASSISTANT_TASK:>
Python Code:
import git
GIT_LOG_FILE = r'${REPO}/spring-petclinic'
repo = git.Repo(GIT_LOG_FILE)
git_bin = repo.git
git_bin
git_log = git_bin.execute('git log --numstat --pretty=format:"\t\t\t%h\t%at\t%aN"')
git_log[:100]
import pandas as pd
from io import StringIO
commits_raw = pd.read_csv(StringIO(git_log),
sep="\t",
header=None,
names=['additions', 'deletions', 'filename', 'sha', 'timestamp', 'author']
)
commits_raw.head()
commits = commits_raw.fillna(method='ffill')
commits.head()
commits = commits.dropna()
commits.head()
pd.read_csv("../../spring-petclinic/git.log",
sep="\t",
header=None,
names=[
'additions',
'deletions',
'filename',
'sha',
'timestamp',
'author']).fillna(method='ffill').dropna().head()
commits['additions'] = pd.to_numeric(commits['additions'], errors='coerce')
commits['deletions'] = pd.to_numeric(commits['deletions'], errors='coerce')
commits = commits.dropna()
commits['timestamp'] = pd.to_datetime(commits['timestamp'], unit="s")
commits.head()
commits.groupby('filename')[['timestamp']].count().sort_values(by='timestamp', ascending=False).head(10)
java_commits = commits[commits['filename'].str.endswith(".c")]
java_commits.head()
java_commits.groupby('author').sum()[['additions']].sort_values(by='additions', ascending=False).head()
commits[commits['timestamp'].max() == commits['timestamp']]
java_commits[java_commits['timestamp'].min() == java_commits['timestamp']]
commits = commits[commits['timestamp'] <= 'today']
latest = commits.sort_values(by='timestamp', ascending=False)
latest.head()
commits['today'] = pd.Timestamp('today')
commits.head()
initial_commit_date = commits[-1:]['timestamp'].values[0]
initial_commit_date
commits = commits[commits['timestamp'] >= initial_commit_date]
commits.head()
commits['age'] = commits['timestamp'] - commits['today']
commits.head()
commits.groupby('filename')[['age']].min().sort_values(by='age').head(10)
java_commits.groupby('filename')\
.count()[['additions']]\
.sort_values(by='additions', ascending=False).head()
ages = commits.sort_values(by='age', ascending=False).drop_duplicates(subset=['filename'])['age'] * -1
ages.head()
ages.dt.days.hist()
commits.groupby('filename')
import glob
file_list = [
os.path.abspath(path).replace(os.sep, "/") for path in glob.iglob("../../linux/**/*.*")]
file_list[:5]
[os.path.normpath
%matplotlib inline
commits.groupby('filename')\
.count()[['additions']]\
.sort_values(by='additions', ascending=False)\
.plot(kind='bar')
commits.sort_values(by='age', ascending=False).groupby('filename').first().sort_values(by='age', ascending=False)
%matplotlib inline
commits.groupby('filename')\
.count()['additions']\
.hist(bins=20)
commits.groupby('filename').count().sort_values(by='additions', ascending=False)
commits.groupby('author').sum()[['additions']].sort_values(by='additions', ascending=False)
%matplotlib inline
timed_commits = java_commits.set_index(pd.DatetimeIndex(java_commits['timestamp']))[['additions', 'deletions']].resample('1D').sum()
timed_commits
(timed_commits['additions'] - timed_commits['deletions']).cumsum().fillna(method='ffill').plot()
c = commits[commits['timestamp'] <= 'today']
c.sort_values(by='timestamp', ascending=False).head()
c = c\
.groupby('sha')\
.first()\
.reset_index()
c.head()
%matplotlib inline
c.set_index(
pd.DatetimeIndex(c['timestamp'])
)['additions']\
.resample('W-SUN', convention='start')\
.count()\
.tail(500)\
.plot(kind='area', figsize=(100,7))
c.set_index(
pd.DatetimeIndex(c['timestamp'])
)['additions']\
.resample('W-SUN', convention='start')\
.count()\
.tail(500)\
df = c.set_index(
pd.DatetimeIndex(c['timestamp']))
df2 = df.resample('W').count().dropna()
df2.tail()
df2['month'] = df2.index.month
df2.head()
df3 = df2.groupby([df2.index.year, df2.index.month]).aggregate({'month': 'first', 'sha' : 'min'})
df3.head()
df3.groupby(df3.index).count()
%matplotlib inline
commits['author'].value_counts().plot(kind='pie', figsize=(10,10))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we set some parameters.
Step2: Load the MEG data
Step3: Estimate the background noise covariance from the baseline period
Step4: Generate sinusoids in two spatially distant labels
Step5: Find the center vertices in source space of each label
Step6: Create source-space data with known signals
Step7: Plot original signals
Step8: Simulate sensor-space signals
Step9: Plot the point-spread of corrupted signal
|
<ASSISTANT_TASK:>
Python Code:
import os.path as op
import numpy as np
from mayavi import mlab
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.simulation import simulate_stc, simulate_evoked
seed = 42
# parameters for inverse method
method = 'sLORETA'
snr = 3.
lambda2 = 1.0 / snr ** 2
# signal simulation parameters
# do not add extra noise to the known signals
nave = np.inf
T = 100
times = np.linspace(0, 1, T)
dt = times[1] - times[0]
# Paths to MEG data
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-fwd.fif')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-fixed-inv.fif')
fname_evoked = op.join(data_path, 'MEG', 'sample',
'sample_audvis-ave.fif')
fwd = mne.read_forward_solution(fname_fwd)
fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True,
use_cps=False)
fwd['info']['bads'] = []
inv_op = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw.fif'))
raw.set_eeg_reference(projection=True)
events = mne.find_events(raw)
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2}
epochs = mne.Epochs(raw, events, event_id, baseline=(None, 0), preload=True)
epochs.info['bads'] = []
evoked = epochs.average()
labels = mne.read_labels_from_annot('sample', subjects_dir=subjects_dir)
label_names = [l.name for l in labels]
n_labels = len(labels)
cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
# The known signal is all zero-s off of the two labels of interest
signal = np.zeros((n_labels, T))
idx = label_names.index('inferiorparietal-lh')
signal[idx, :] = 1e-7 * np.sin(5 * 2 * np.pi * times)
idx = label_names.index('rostralmiddlefrontal-rh')
signal[idx, :] = 1e-7 * np.sin(7 * 2 * np.pi * times)
hemi_to_ind = {'lh': 0, 'rh': 1}
for i, label in enumerate(labels):
# The `center_of_mass` function needs labels to have values.
labels[i].values.fill(1.)
# Restrict the eligible vertices to be those on the surface under
# consideration and within the label.
surf_vertices = fwd['src'][hemi_to_ind[label.hemi]]['vertno']
restrict_verts = np.intersect1d(surf_vertices, label.vertices)
com = labels[i].center_of_mass(subject='sample',
subjects_dir=subjects_dir,
restrict_vertices=restrict_verts,
surf='white')
# Convert the center of vertex index from surface vertex list to Label's
# vertex list.
cent_idx = np.where(label.vertices == com)[0][0]
# Create a mask with 1 at center vertex and zeros elsewhere.
labels[i].values.fill(0.)
labels[i].values[cent_idx] = 1.
stc_gen = simulate_stc(fwd['src'], labels, signal, times[0], dt,
value_fun=lambda x: x)
kwargs = dict(subjects_dir=subjects_dir, hemi='split', smoothing_steps=4,
time_unit='s', initial_time=0.05, size=1200,
views=['lat', 'med'])
clim = dict(kind='value', pos_lims=[1e-9, 1e-8, 1e-7])
figs = [mlab.figure(1), mlab.figure(2), mlab.figure(3), mlab.figure(4)]
brain_gen = stc_gen.plot(clim=clim, figure=figs, **kwargs)
evoked_gen = simulate_evoked(fwd, stc_gen, evoked.info, cov, nave,
random_state=seed)
# Map the simulated sensor-space data to source-space using the inverse
# operator.
stc_inv = apply_inverse(evoked_gen, inv_op, lambda2, method=method)
figs = [mlab.figure(5), mlab.figure(6), mlab.figure(7), mlab.figure(8)]
brain_inv = stc_inv.plot(figure=figs, **kwargs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If $N$ = num_points, then the error in fitting a line to the points (also defined as Cost, $C$) can be defined as
|
<ASSISTANT_TASK:>
Python Code:
def generate_random_points_along_a_line (slope, intercept, num_points, abs_value, abs_noise):
# randomly select x
x = np.random.uniform(-abs_value, abs_value, num_points)
# y = mx + b + noise
y = slope*x + intercept + np.random.uniform(-abs_noise, abs_noise, num_points)
return x, y
def plot_points(x,y):
plt.scatter(x, y)
plt.title('Scatter plot of x and y')
plt.xlabel('x')
plt.ylabel('y')
slope = 4
intercept = -3
num_points = 20
abs_value = 4
abs_noise = 2
x, y = generate_random_points_along_a_line (slope, intercept, num_points, abs_value, abs_noise)
plot_points(x, y)
# this function computes gradient with respect to slope m
def grad_m (x, y, m, b):
return np.sum(np.multiply(-2*(y - (m*x + b)), x))
# this function computes gradient with respect to intercept b
def grad_b (x, y, m, b):
return np.sum(-2*(y - (m*x + b)))
# Performs gradient descent
def gradient_descent (x, y, num_iterations, learning_rate):
# Initialize m and b
m = np.random.uniform(-1, 1, 1)
b = np.random.uniform(-1, 1, 1)
# Update m and b in direction opposite to that of the gradient to minimize loss
for i in range(num_iterations):
m = m - learning_rate * grad_m (x, y, m, b)
b = b - learning_rate * grad_b (x, y, m, b)
# Return final slope and intercept
return m, b
# Plot point along with the best fit line
def plot_line (m, b, x, y):
plot_points(x,y)
plt.plot(x, x*m + b, 'r')
plt.show()
# In general, keep num_iterations high and learning_rate low.
num_iterations = 1000
learning_rate = 0.0001
m, b = gradient_descent (x, y, num_iterations, learning_rate)
plot_line (m, b, x, y)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Polynomial regression can be done with the functions polyfit
Step2: Using a 1st-degree polynomial fit (that is, fitting a straight line to x and y),
Step3: When the error of predicted results is larger than desired,
Step4: Run the following code to produce an example plot
Step5: In the above figure, we see fits for three different values of $d$.
Step6: In order to quantify the effects of bias and variance and construct
Step7: This figure compactly shows the reason that cross-validation is
Step8: Here we show the learning curve for $d = 1$. From the above
Step9: Here we show the learning curve for $d = 20$. From the above
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import numpy as np
np.random.seed(42)
x = np.random.random(20)
y = np.sin(2 * x)
p = np.polyfit(x, y, 1) # fit a 1st-degree polynomial (i.e. a line) to the data
print p # slope and intercept
x_new = np.random.random(3)
y_new = np.polyval(p, x_new) # evaluate the polynomial at x_new
print abs(np.sin(x_new) - y_new)
import pylab as pl
def plot_fit(x, y, p):
xfit = np.linspace(0, 1, 1000)
yfit = np.polyval(p, xfit)
pl.scatter(x, y, c='k')
pl.plot(xfit, yfit)
pl.xlabel('x')
pl.ylabel('y')
plot_fit(x, y, p)
def test_func(x, err=0.5):
return np.random.normal(10 - 1. / (x + 0.1), err)
def compute_error(x, y, p):
yfit = np.polyval(p, x)
return np.sqrt(np.mean((y - yfit) ** 2))
N = 8
np.random.seed(42)
x = 10 ** np.linspace(-2, 0, N)
y = test_func(x)
xfit = np.linspace(-0.2, 1.2, 1000)
titles = ['d = 1 (under-fit)', 'd = 2', 'd = 6 (over-fit)']
degrees = [1, 2, 6]
pl.figure(figsize = (9, 3.5))
pl.subplots_adjust(left = 0.06, right=0.98,
bottom=0.15, top=0.85,
wspace=0.05)
for i, d in enumerate(degrees):
pl.subplot(131 + i, xticks=[], yticks=[])
pl.scatter(x, y, marker='x', c='k', s=50)
p = np.polyfit(x, y, d)
yfit = np.polyval(p, xfit)
pl.plot(xfit, yfit, '-b')
pl.xlim(-0.2, 1.2)
pl.ylim(0, 12)
pl.xlabel('house size')
if i == 0:
pl.ylabel('price')
pl.title(titles[i])
Ntrain = 100
Ncrossval = 100
Ntest = 50
error = 1.0
# randomly sample the data
np.random.seed(0)
x = np.random.random(Ntrain + Ncrossval + Ntest)
y = test_func(x, error)
# select training set
# data is already random, so we can just choose a slice.
xtrain = x[:Ntrain]
ytrain = y[:Ntrain]
# select cross-validation set
xcrossval = x[Ntrain:Ntrain + Ncrossval]
ycrossval = y[Ntrain:Ntrain + Ncrossval]
# select test set
xtest = x[Ntrain:-Ntest]
ytest = y[Ntrain:-Ntest]
pl.scatter(xtrain, ytrain, color='red')
pl.scatter(xcrossval, ycrossval, color='blue')
degrees = np.arange(1, 21)
train_err = np.zeros(len(degrees))
crossval_err = np.zeros(len(degrees))
test_err = np.zeros(len(degrees))
for i, d in enumerate(degrees):
p = np.polyfit(xtrain, ytrain, d)
train_err[i] = compute_error(xtrain, ytrain, p)
crossval_err[i] = compute_error(xcrossval, ycrossval, p)
pl.figure()
pl.title('Error for 100 Training Points')
pl.plot(degrees, crossval_err, lw=2, label = 'cross-validation error')
pl.plot(degrees, train_err, lw=2, label = 'training error')
pl.plot([0, 20], [error, error], '--k', label='intrinsic error')
pl.legend()
pl.xlabel('degree of fit')
pl.ylabel('rms error')
# suppress warnings from Polyfit
import warnings
warnings.filterwarnings('ignore', message='Polyfit*')
def plot_learning_curve(d):
sizes = np.linspace(2, Ntrain, 50).astype(int)
train_err = np.zeros(sizes.shape)
crossval_err = np.zeros(sizes.shape)
for i, size in enumerate(sizes):
p = np.polyfit(xtrain[:size], ytrain[:size], d)
crossval_err[i] = compute_error(xcrossval, ycrossval, p)
train_err[i] = compute_error(xtrain[:size], ytrain[:size], p)
fig = pl.figure()
pl.plot(sizes, crossval_err, lw=2, label='cross-val error')
pl.plot(sizes, train_err, lw=2, label='training error')
pl.plot([0, Ntrain], [error, error], '--k', label='intrinsic error')
pl.xlabel('traning set size')
pl.ylabel('rms error')
pl.legend(loc = 0)
pl.ylim(0, 4)
pl.xlim(0, 99)
pl.title('d = %i' % d)
plot_learning_curve(d=1)
plot_learning_curve(d=20)
plot_learning_curve(d=6)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: FeatureCollection
Step2: Image
Step3: Execute
|
<ASSISTANT_TASK:>
Python Code:
import ee
ee.Initialize()
from geetools import batch
p1 = ee.Geometry.Point([-71,-42])
p2 = ee.Geometry.Point([-71,-43])
p3 = ee.Geometry.Point([-71,-44])
feat1 = ee.Feature(p1.buffer(1000), {'site': 1})
feat2 = ee.Feature(p2.buffer(1000), {'site': 2})
feat3 = ee.Feature(p3.buffer(1000), {'site': 3})
fc = ee.FeatureCollection([feat1, feat2, feat3])
collection = ee.ImageCollection('COPERNICUS/S2').filterBounds(fc.geometry())
image = collection.mosaic()
task = batch.Export.image.toDriveByFeature(
image,
collection=fc,
folder='tools_exportbyfeat',
namePattern='test {site}',
scale=10,
dataType='float',
verbose=True
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Training the Base Model
Step2: Save model to disk. Don't finalize the model because we need to train it with new data later!
Step3: Training the Classifier
Step4: Political Ideology Detection
|
<ASSISTANT_TASK:>
Python Code:
# import logging
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
# logging.root.level = logging.INFO
from os import path
from random import shuffle
from corputil import FileCorpus, ListCorpus
from corputil.utils import load_stopwords
from gensim.models.word2vec import LineSentence, Word2Vec
spd = [
path.join('data', 'Politics', 'SPD.txt'),
path.join('data', 'Politics', 'SPD_EU.txt'),
path.join('data', 'Politics', 'SPD_Fraktion.txt')
]
linke = [
path.join('data', 'Politics', 'Linke.txt'),
path.join('data', 'Politics', 'Linke_EU.txt'),
path.join('data', 'Politics', 'Linke_Fraktion.txt')
]
gruene = [
path.join('data', 'Politics', 'Grüne.txt'),
path.join('data', 'Politics', 'Grüne_EU.txt'),
path.join('data', 'Politics', 'Grüne_Fraktion.txt')
]
fdp = [
path.join('data', 'Politics', 'FDP.txt'),
path.join('data', 'Politics', 'FDP_EU.txt'),
path.join('data', 'Politics', 'FDP_Fraktion.txt')
]
cdu = [
path.join('data', 'Politics', 'CDU.txt'),
path.join('data', 'Politics', 'CDU_EU.txt'),
path.join('data', 'Politics', 'CDU_Fraktion.txt')
]
npd = [
path.join('data', 'Politics', 'NPD_Fraktion_MV.txt'),
path.join('data', 'Politics', 'NPD_Fraktion_Sachsen.txt'),
path.join('data', 'Politics', 'NPD_Jung.txt')
]
corpora = [
FileCorpus(linke),
FileCorpus(spd),
FileCorpus(gruene),
FileCorpus(fdp),
FileCorpus(cdu),
FileCorpus(npd)
]
parties = [
'Linke',
'SPD',
'Gruene',
'FDP',
'CDU',
'NPD'
]
sentences = LineSentence(path.join('data', 'Archive', 'Corpus_Wiki.txt'))
base = Word2Vec(sentences, workers=4, iter=4, size=100, window=2, sg=1)
base.save(path.join('models', 'word2vec', 'Base.w2v'))
base = None
sentences = None
for party, corpus in zip(parties, corpora):
sentences = list(corpus.sentences_token())
shuffle(sentences)
model = Word2Vec.load(path.join('models', 'word2vec', 'Base.w2v'))
model.train(sentences, total_examples=len(sentences))
model.save(path.join('models', 'word2vec', '{}.w2v'.format(party)))
models = [path.join('models', 'word2vec', '{}.w2v'.format(party)) for party in parties]
labels = ['2015-44', '2015-45', '2015-46', '2015-47', '2015-48', '2015-49', '2015-50', '2015-51',
'2015-52', '2015-53', '2016-01', '2016-02', '2016-03', '2016-04', '2016-05', '2016-06']
files = [path.join('data', 'CurrentNews', '{}.csv').format(label) for label in labels]
out = [path.join('data', 'CurrentNews', 's_{}.csv').format(label) for label in labels]
import pandas as pd
import numpy as np
def calc_score(doc, mod):
model = Word2Vec.load(mod)
score = model.score(doc, len(doc))
return score
# Taken from Matt Taddy: https://github.com/TaddyLab/gensim/blob/deepir/docs/notebooks/deepir.ipynb
def calc_probability(df, mods):
docs = list(ListCorpus(list(df.loc[:, 'text'])).doc_sentences_token())
sentlist = [s for d in docs for s in d]
llhd = np.array( [ calc_score(sentlist, m) for m in mods ] )
lhd = np.exp(llhd - llhd.max(axis=0))
prob = pd.DataFrame( (lhd/lhd.sum(axis=0)).transpose() )
prob["doc"] = [i for i,d in enumerate(docs) for s in d]
prob = prob.groupby("doc").mean()
return prob
# raw = pd.concat([pd.read_csv(file, sep='|', encoding='utf-8') for file in files], ignore_index=True)
# prob = calc_probability(raw, models)
# data = pd.concat([raw, prob], axis=1)
# data.groupby('site').mean()
for file, o in zip(files, out):
data = pd.read_csv(file, sep='|', encoding='utf-8')
sentiment = calc_probability(data, models)
csv = pd.concat([data, sentiment], axis=1)
csv.rename(columns={ 0: 'LINKE', 1: 'SPD', 2: 'GRÜNE', 3: 'FDP', 4: 'CDU', 5: 'NPD' }, inplace=True)
csv.to_csv(o, index=False, encoding='utf-8', sep='|')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use of the g.mapset
Step2: print projection info with g.proj
Step3: list vector and raster layers with g.list
Step4: use the getLayerList function to store the g.list output in a python list
Step5: print info for a raster layer with r.info
Step6: use the 'r/v'layerInfo function to store the r.info / v.info output in a python dictionary
Step7: use of the makeImage function to display raster and/or vector maps
Step8: Example on how to repoject raster and vector data between 2 different GRASS LOCATION
|
<ASSISTANT_TASK:>
Python Code:
!g.gisenv
!g.mapset location=nc_basic_spm_grass7 mapset=user1
!g.proj -p
!g.list rast
rasterlist = getLayerList(type='rast')
vectorlist = getLayerList(type='vect')
rasterlist
vectorlist
!r.info elevation@PERMANENT
rasterlayerinfo = rlayerInfo(map='elevation')
vectorlayerinfo = vlayerInfo(map='geology')
rasterlayerinfo.keys()
rlayerInfo('elevation')
!g.mapset location=nc_basic_spm_grass7 mapset=user1
inputlayer={
'raster': ['elevation'],
'vector':['points_of_interest']
}
makeImage(basemap='elevation', inputlayer=inputlayer, maptype='overlay',
vsize=10, maptitle='points_of_interest', gridsize=4000, outputimagename='test.png')
from IPython.core.display import Image
!g.proj -c epsg=4326 location=lonlat
!g.mapset -c location=lonlat mapset=PERMANENT
region = !r.proj input=elevation location=nc_basic_spm_grass7 -g
region
newregion = dict([(i.split('=')[0],i.split('=')[1]) for i in region[-1].split()])
!g.region -p n={newregion['n']} s={newregion['s']} e={newregion['e']} w={newregion['w']} res=0.0001
!r.proj input=elevation location=nc_basic_spm_grass7 output=elevation method=bicubic --o --q
!v.proj input=points_of_interest location=nc_basic_spm_grass7 output=points_of_interest --o --q
#!g.region -p n={newregion['n']} s={newregion['s']} e={newregion['e']} w={newregion['w']} res=0.0001
inputlayer={
'raster': ['elevation'],
'vector':['points_of_interest']
}
makeImage(basemap='elevation', inputlayer=inputlayer, maptype='overlay',
vsize=10, maptitle='points_of_interest', outputimagename='test.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: EECS 445
Step3: Dimensionality Reduction
Step4: Example
Step5: Break time!
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
# plotting
%matplotlib inline
from matplotlib import pyplot as plt;
import matplotlib as mpl;
from mpl_toolkits.mplot3d import Axes3D
# scientific
import numpy as np;
import sklearn as skl;
import sklearn.datasets;
import sklearn.cluster;
import sklearn.mixture;
# ipython
import IPython;
# python
import os;
import random;
#####################################################
# image processing
import PIL;
# trim and scale images
def trim(im, percent=100):
print("trim:", percent);
bg = PIL.Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = PIL.ImageChops.difference(im, bg)
diff = PIL.ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
x = im.crop(bbox)
return x.resize(((x.size[0]*percent)//100,
(x.size[1]*percent)//100),
PIL.Image.ANTIALIAS);
def plot_plane():
# random samples
n = 200;
data = np.random.random((3,n));
data[2,:] = 0.4 * data[1,:] + 0.6 * data[0,:];
# plot plane
fig = plt.figure(figsize=(10,6));
ax = fig.add_subplot(111, projection="3d");
ax.scatter(*data);
plot_plane()
## scikit example: Faces recognition example using eigenfaces and SVMs
from __future__ import print_function
from time import time
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
#print("Extracting the top %d eigenfaces from %d faces"
# % (n_components, X_train.shape[0]))
#t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
#print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
#print("Projecting the input data on the eigenfaces orthonormal basis")
#t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
#print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
#print("Fitting the classifier to the training set")
#t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
#print("done in %0.3fs" % (time() - t0))
#print("Best estimator found by grid search:")
#print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
#print("Predicting people's names on the test set")
#t0 = time()
y_pred = clf.predict(X_test_pca)
#print("done in %0.3fs" % (time() - t0))
#print(classification_report(y_test, y_pred, target_names=target_names))
#print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
Helper function to plot a gallery of portraits
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
X, y = skl.datasets.make_blobs(1000, cluster_std=[1.0, 2.5, 0.5], random_state=170)
plt.scatter(X[:,0], X[:,1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fundamental variables
Step2: Derived variables
Step3: The system's vector basis is given by $(\hat{\ell}, \hat{n}, \hat{\lambda})$, and will be computed by the code in terms of the fundamental logarithmic rotors defined above. Here, we give all the substitutions that will be needed in the code.
Step4: Various spin components and combinations
Step5: Other functions of the angular velocity that find frequent use
|
<ASSISTANT_TASK:>
Python Code:
# Make sure division of integers does not round to the nearest integer
from __future__ import division
# Make everything in python's symbolic math package available
from sympy import * # Make sure sympy functions are used in preference to numpy
import sympy # Make sympy. constructions available
from sympy import Rational as frac # Rename for similarity to latex
from sympy import log as ln
# Print symbolic expressions nicely
init_printing()
# We'll use the numpy `array` object for vectors
from numpy import array, cross, dot
# We'll use a custom object to keep track of variables
from Utilities.PNObjects import PNCollection
PNVariables = PNCollection()
# Dimensionful quantities, just in case anybody uses them...
PNVariables.AddBasicConstants('G, c')
# Masses of objects 1 and 2.
PNVariables.AddBasicConstants('M1')
PNVariables.AddBasicConstants('M2')
# Angular speed of separation vector
PNVariables.AddBasicVariables('v', positive=True)
# Tidal deformabilities, in units where the total mass is 1
PNVariables.AddBasicConstants('lambda1, lambda2')
# Spin vectors (assumed to be constant)
PNVariables.AddBasicVariables('chi1_x, chi1_y, chi1_z')
PNVariables.AddBasicVariables('chi2_x, chi2_y, chi2_z')
# Orbital angular-velocity unit vector ("Newtonian" angular momentum direction)
PNVariables.AddBasicVariables('ellHat_x, ellHat_y, ellHat_z')
# Orbital separation unit vector
PNVariables.AddBasicVariables('nHat_x, nHat_y, nHat_z')
PNVariables.AddDerivedConstant('M', M1+M2)
PNVariables.AddDerivedConstant('delta', (M1-M2)/M)
PNVariables.AddDerivedConstant('nu', M1*M2/M**2)
PNVariables.AddDerivedConstant('nu__2', (M1*M2/M**2)**2)
PNVariables.AddDerivedConstant('nu__3', (M1*M2/M**2)**3)
PNVariables.AddDerivedConstant('q', M1/M2)
PNVariables.AddDerivedVariable('ellHat', array([ellHat_x, ellHat_y, ellHat_z]), datatype='std::vector<double>')
PNVariables.AddDerivedVariable('nHat', array([nHat_x, nHat_y, nHat_z]), datatype='std::vector<double>')
PNVariables.AddDerivedVariable('lambdaHat', cross(ellHat.substitution, nHat.substitution), datatype='std::vector<double>')
# Components of lambdaHat are defined in terms of components of ellHat and nHat
for i,d in zip([0,1,2],['x','y','z']):
PNVariables.AddDerivedVariable('lambdaHat_'+d, lambdaHat.substitution[i])
PNVariables.AddDerivedVariable('chiVec1', array([chi1_x, chi1_y, chi1_z]), datatype='std::vector<double>')
PNVariables.AddDerivedVariable('chiVec2', array([chi2_x, chi2_y, chi2_z]), datatype='std::vector<double>')
PNVariables.AddDerivedVariable('chi1Mag', sqrt(chi1_x**2 + chi1_y**2 + chi1_z**2))
PNVariables.AddDerivedVariable('chi2Mag', sqrt(chi2_x**2 + chi2_y**2 + chi2_z**2))
PNVariables.AddDerivedConstant('chi1chi1', dot(chiVec1.substitution, chiVec1.substitution))
PNVariables.AddDerivedVariable('chi1chi2', dot(chiVec1.substitution, chiVec2.substitution))
PNVariables.AddDerivedConstant('chi2chi2', dot(chiVec2.substitution, chiVec2.substitution))
PNVariables.AddDerivedVariable('chi1_ell', dot(chiVec1.substitution, ellHat.substitution))
PNVariables.AddDerivedVariable('chi1_n', dot(chiVec1.substitution, nHat.substitution))
PNVariables.AddDerivedVariable('chi1_lambda', dot(chiVec1.substitution, lambdaHat.substitution))
PNVariables.AddDerivedVariable('chi2_ell', dot(chiVec2.substitution, ellHat.substitution))
PNVariables.AddDerivedVariable('chi2_n', dot(chiVec2.substitution, nHat.substitution))
PNVariables.AddDerivedVariable('chi2_lambda', dot(chiVec2.substitution, lambdaHat.substitution))
PNVariables.AddDerivedConstant('sqrt1Mchi1chi1', sqrt(1-chi1chi1))
PNVariables.AddDerivedConstant('sqrt1Mchi2chi2', sqrt(1-chi2chi2))
PNVariables.AddDerivedVariable('S', chiVec1.substitution*M1**2 + chiVec2.substitution*M2**2, datatype=chiVec1.datatype)
PNVariables.AddDerivedVariable('S_ell', chi1_ell*M1**2 + chi2_ell*M2**2)
PNVariables.AddDerivedVariable('S_n', chi1_n*M1**2 + chi2_n*M2**2)
PNVariables.AddDerivedVariable('S_lambda', chi1_lambda*M1**2 + chi2_lambda*M2**2)
PNVariables.AddDerivedVariable('Sigma', M*(chiVec2.substitution*M2 - chiVec1.substitution*M1), datatype=chiVec1.datatype)
PNVariables.AddDerivedVariable('Sigma_ell', M*(chi2_ell*M2 - chi1_ell*M1))
PNVariables.AddDerivedVariable('Sigma_n', M*(chi2_n*M2 - chi1_n*M1))
PNVariables.AddDerivedVariable('Sigma_lambda', M*(chi2_lambda*M2 - chi1_lambda*M1))
PNVariables.AddDerivedVariable('chi_s', (chiVec1.substitution + chiVec2.substitution)/2, datatype=chiVec1.datatype)
PNVariables.AddDerivedVariable('chi_s_ell', (chi1_ell+chi2_ell)/2)
PNVariables.AddDerivedVariable('chi_s_n', (chi1_n+chi2_n)/2)
PNVariables.AddDerivedVariable('chi_s_lambda', (chi1_lambda+chi2_lambda)/2)
PNVariables.AddDerivedVariable('chi_a', (chiVec1.substitution - chiVec2.substitution)/2, datatype=chiVec1.datatype)
PNVariables.AddDerivedVariable('chi_a_ell', (chi1_ell-chi2_ell)/2)
PNVariables.AddDerivedVariable('chi_a_n', (chi1_n-chi2_n)/2)
PNVariables.AddDerivedVariable('chi_a_lambda', (chi1_lambda-chi2_lambda)/2)
PNVariables.AddDerivedVariable('x', v**2)
PNVariables.AddDerivedVariable('Omega_orb', (v**3)/M)
PNVariables.AddDerivedVariable('logv', log(v))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic Network Statistics
Step2: Exercise
Step3: Exercise
Step4: Since this is a social network of people, there'll be attributes for each individual, such as age, and sex. We can grab that data off from the attributes that are stored with each node.
Step5: Exercise
Step6: Edges can also store attributes in their attribute dictionary.
Step7: In this synthetic social network, I have stored the date as a datetime object. Datetime objects have attributes, namely .year, .month, .day.
Step8: Exercise
Step9: Live Exercise
Step10: Coding Patterns
Step11: If the network is small enough to visualize, and the node labels are small enough to fit in a circle, then you can use the with_labels=True argument.
Step12: However, note that if the number of nodes in the graph gets really large, node-link diagrams can begin to look like massive hairballs. This is undesirable for graph visualization.
Step13: Let's try another visualization, the Circos plot. We can order the nodes in the Circos plot according to the node ID, but any other ordering is possible as well. Edges are drawn between two nodes.
Step14: It's pretty obvious in this visualization that there are nodes, such as node 5 and 18, that are not connected to any other node via an edge. There are other nodes, like node number 19, which is highly connected to other nodes.
|
<ASSISTANT_TASK:>
Python Code:
G = nx.read_gpickle('Synthetic Social Network.pkl') #If you are Python 2.7, read in Synthetic Social Network 27.pkl
nx.draw(G)
# Who are represented in the network?
G.nodes(data=True)
len(G.nodes())
# Who is connected to who in the network?
G.edges()
len(G.edges())
# Let's get a list of nodes with their attributes.
G.nodes(data=True)
# NetworkX will return a list of tuples in the form (node_id, attribute_dictionary)
from collections import Counter
Counter([d['sex'] for n, d in G.nodes(data=True)])
G.edges(data=True)
# Answer
dates = [d['date'] for _, _, d in G.edges(data=True)]
mindate = min(dates)
maxdate = max(dates)
print(mindate, maxdate)
# Answer
G.add_node(31, age=22, sex='Male')
G.add_node(32, age=24, sex='Female')
G.add_edge(31, 32, date=datetime(2010,1,9))
G.add_edge(31, 7, date=datetime(2009,12,11))
G.add_edge(32, 7, date=datetime(2009,12,11))
G.node[31]
ptG = nx.DiGraph() #ptG stands for PyCon Tutorial Graph.
# Add in nodes and edges
ptG.add_node('Eric', nationality='Canada')
ptG.add_node('Paul', nationality='Canada') # (my own TextExpander shortcut is ;addnode)
ptG.add_node('Max', nationality='US')
ptG.add_node('Martin', nationality='Other')
ptG.add_node('Jim', nationality='US')
ptG.add_node('Lucas', nationality='US')
ptG.add_node('Thomas', nationality='US')
ptG.add_node('Brad', nationality='US')
ptG.add_node('Troy', nationality='Canada')
ptG.add_node('Cory', nationality='Canada')
ptG.add_node('Gokhan', nationality='US')
ptG.add_node('Riley', nationality='US')
ptG.add_node('Steve', nationality='US')
ptG.add_node('Ryan', nationality='US')
ptG.add_node('Andrew', nationality='US')
ptG.add_node('Ronan', nationality='Other')
ptG.add_node('Cody', nationality='Canada')
ptG.add_node('Jon', nationality='US')
ptG.add_node('Eric2', nationality='US')
ptG.add_node('William', nationality='US')
ptG.add_node('Tom', nationality='Other')
ptG.add_node('Chris', nationality='US')
ptG.add_node('Stu', nationality='US')
ptG.add_node('Zach', nationality='US')
ptG.add_node('Clint', nationality='Canada')
ptG.add_node('Aaron', nationality='US')
ptG.add_node('Vishal', nationality='US')
ptG.add_node('Federico', nationality='Other')
ptG.add_edge('Vishal', 'Aaron')
ptG.add_edge('Vishal', 'Eric')
ptG.add_edge('Aaron', 'Vishal')
ptG.add_edge('Aaron', 'Eric')
ptG.add_edge('Clint', 'Zach')
ptG.add_edge('Clint', 'Eric')
ptG.add_edge('Zach', 'Clint')
ptG.add_edge('Zach', 'Riley')
ptG.add_edge('Zach', 'Stu')
ptG.add_edge('Stu', 'Zach')
ptG.add_edge('Stu', 'Eric')
ptG.add_edge('Stu', 'Chris')
ptG.add_edge('Chris', 'Stu')
ptG.add_edge('Chris', 'Eric')
ptG.add_edge('Tom', 'Tom')
ptG.add_edge('William', 'Jon')
ptG.add_edge('William', 'Eric2')
ptG.add_edge('William', 'Eric')
ptG.add_edge('Eric2', 'William')
ptG.add_edge('Eric2', 'Jon')
ptG.add_edge('Jon', 'Eric2')
ptG.add_edge('Jon', 'William')
ptG.add_edge('Jon', 'Eric')
ptG.add_edge('Cody', 'Eric')
ptG.add_edge('Cody', 'Ronan')
ptG.add_edge('Ronan', 'Eric')
ptG.add_edge('Ronan', 'Cody')
ptG.add_edge('Andrew', 'Eric')
ptG.add_edge('Andrew', 'Ryan')
ptG.add_edge('Ryan', 'Eric')
ptG.add_edge('Ryan', 'Andrew')
ptG.add_edge('Steve', 'Eric')
ptG.add_edge('Riley', 'Zach')
ptG.add_edge('Paul', 'Paul') # (my own TextExpander shortcut is ;addedge)
ptG.add_edge('Martin', 'Max')
ptG.add_edge('Max', 'Paul')
ptG.add_edge('Martin', 'Eric')
ptG.add_edge('Martin', 'Max')
ptG.add_edge('Jim', 'Federico')
ptG.add_edge('Lucas', 'Thomas')
ptG.add_edge('Brad', 'Eric')
ptG.add_edge('Thomas', 'Lucas')
ptG.add_edge('Troy', 'Cory')
ptG.add_edge('Troy', 'Eric')
ptG.add_edge('Cory', 'Troy')
ptG.add_edge('Gokhan', 'Max')
# We are now going to draw the network using a hive plot, grouping the nodes by the top two nationality groups, and 'others'
# for the third group.
nodes = dict()
nodes['Canada'] = [n for n, d in ptG.nodes(data=True) if d['nationality'] == 'Canada'] #list comprehension here
nodes['US'] = [n for n, d in ptG.nodes(data=True) if d['nationality'] == 'US'] #list comprehension here
nodes['Other'] = [n for n, d in ptG.nodes(data=True) if d['nationality'] == 'Other'] #list comprehension here
edges = dict()
edges['group1'] = [(n1, n2, d) for n1, n2, d in ptG.edges(data=True)] #list comprehension here
nodes_cmap = dict()
nodes_cmap['Canada'] = 'blue'
nodes_cmap['US'] = 'green'
nodes_cmap['Other'] = 'black'
edges_cmap = dict()
edges_cmap['group1'] = 'black'
from hiveplot import HivePlot
h = HivePlot(nodes, edges, nodes_cmap, edges_cmap)
h.set_minor_angle(np.pi / 12) #optional
h.draw()
nx.draw(G)
nx.draw(G, with_labels=True)
matrix = nx.to_numpy_matrix(G)
plt.pcolor(np.array(matrix))
plt.axes().set_aspect('equal') # set aspect ratio equal to get a square visualization
plt.xlim(min(G.nodes()), max(G.nodes())) # set x and y limits to the number of nodes present.
plt.ylim(min(G.nodes()), max(G.nodes()))
plt.title('Adjacency Matrix')
plt.show()
from circos import CircosPlot
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
nodes = sorted(G.nodes())
edges = G.edges()
c = CircosPlot(nodes, edges, radius=10, ax=ax)
c.draw()
nodes = dict()
nodes['male'] = [n for n,d in G.nodes(data=True) if d['sex'] == 'Male']
nodes['female'] = [n for n,d in G.nodes(data=True) if d['sex'] == 'Female']
edges = dict()
edges['group1'] = G.edges(data=True)
nodes_cmap = dict()
nodes_cmap['male'] = 'blue'
nodes_cmap['female'] = 'red'
edges_cmap = dict()
edges_cmap['group1'] = 'black'
h = HivePlot(nodes, edges, nodes_cmap, edges_cmap)
h.draw()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Learning and planning are deeply integrated in the sense that they share almost all the same machinery, differing only in the source of their experience.
Step2: 8.4 Prioritized Sweeping
Step3: 8.5 Expected vs. Sample Updates
|
<ASSISTANT_TASK:>
Python Code:
Image('./res/fig8_1.png')
Image('./res/fig8_2.png')
Image('./res/fig8_5.png')
Image('./res/fig8_6.png')
Image('./res/prioritized_sweeping.png')
Image('./res/fig8_7.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Contenido
Step2: Importante
Step3: 2. Librería Numpy
Step4: 2.1 Array vs Matrix
Step5: Desafío 1
Step6: 2.2 Indexación y Slicing
Step7: Observación
Step8: Desafío 2
Step9: 2. Librería Numpy
Step10: 2. Librería Numpy
Step11: Desafío 3
Step12: Desafío 4
Step13: 2. Librería Numpy
Step14: 2. Librería Numpy
Step15: Revisemos si el archivo quedó bien escrito. Cambiaremos de python a bash para utilizar los comandos del terminal
Step16: Desafío 5
Step17: 2. Librería Numpy
Step18: 2.6 Índices
Step19: Desafío 6
|
<ASSISTANT_TASK:>
Python Code:
#Configuracion para recargar módulos y librerías cada vez
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from mat281_code.lab import *
from IPython.core.display import HTML
from matplotlib import pyplot as plt
HTML(open("style/mat281.css", "r").read())
alumno_1 = (r"Sebastián Flores", "2004001-7") # FIX ME
alumno_2 = (r"María José Vargas", "2004007-8") # FIX ME
HTML(greetings(alumno_1, alumno_2))
import numpy as np
print np.version.version # Si alguna vez tienen problemas, verifiquen su version de numpy
# Presionar tabulacción con el cursor despues de np.arr
np.arr
# Presionar Ctr-Enter para obtener la documentacion de la funcion np.array usando "?"
np.array?
# Presionar Ctr-Enter
%who
x = 10
%who
# Operaciones con np.matrix
A = np.matrix([[1,2],[3,4]])
B = np.matrix([[1, 1],[0,1]], dtype=float)
x = np.matrix([[1],[2]])
print "A =\n", A
print "B =\n", B
print "x =\n", x
print "A+B =\n", A+B
print "A*B =\n", A*B
print "A*x =\n", A*x
print "A*A = A^2 =\n", A**2
print "x.T*A =\n", x.T * A
# Operaciones con np.matrix
A = np.array([[1,2],[3,4]])
B = np.array([[1, 1],[0,1]], dtype=float)
x = np.array([1,2]) # No hay necesidad de definir como fila o columna!
print "A =\n", A
print "B =\n", B
print "x =\n", x
print "A+B =\n", A+B
print "AoB = (multiplicacion elementwise) \n", A*B
print "A*B = (multiplicacion matricial, v1) \n", np.dot(A,B)
print "A*B = (multiplicacion matricial, v2) \n", A.dot(B)
print "A*A = A^2 = (potencia matricial)\n", np.linalg.matrix_power(A,2)
print "AoA = (potencia elementwise)\n", A**2
print "A*x =\n", np.dot(A,x)
print "x.T*A =\n", np.dot(x,A) # No es necesario transponer.
# 1: Utilizando matrix
A = np.matrix([]) # FIX ME
B = np.matrix([]) # FIX ME
print "np.matrix, AxB=\n", #FIX ME
# 2: Utilizando arrays
A = np.array([]) # FIX ME
B = np.array([]) # FIX ME
print "np.matrix, AxB=\n", #FIX ME
x = np.arange(9) # "Vector" con valores del 0 al 8
print "x = ", x
print "x[:] = ", x[:]
print "x[5:] = ", x[5:]
print "x[:8] = ", x[:8]
print "x[:-1] = ", x[:-1]
print "x[1:-1] = ", x[1:-1]
print "x[1:-1:2] = ", x[1:-1:2]
A = x.reshape(3,3) # Arreglo con valores del 0 al 8, en 3 filas y 3 columnas.
print "\n"
print "A = \n", A
print "primera fila de A\n", A[0,:]
print "ultima columna de A\n", A[:,-1]
print "submatriz de A\n", A[:2,:2]
def f(x):
return 1 + x**2
x = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) # O utilizar np.linspace!
y = f(x) # Tan facil como llamar f sobre x
dydx = ( y[1:] - y[:-1] ) / ( x[1:] - x[:-1] )
x_aux = 0.5*(x[1:] + x[:-1])
# To plot
fig = plt.figure(figsize=(12,8))
plt.plot(x, y, '-s', label="f")
plt.plot(x_aux, dydx, '-s', label="df/dx")
plt.legend(loc="upper left")
plt.show()
def g(x):
return 1 + x**2 + np.sin(x)
x = np.linspace(0,1,10)
y = g(x)
d2ydx2 = 0 * x # FIX ME
x_aux = 0*d2ydx2 # FIX ME
# To plot
fig = plt.figure(figsize=(12,8))
plt.plot(x, y, label="f")
plt.plot(x_aux, d2ydx2, label="d2f/dx2")
plt.legend(loc="upper left")
plt.show()
# arrays 1d
A = np.ones(3)
print "A = \n", A
print "A.shape =", A.shape
print "len(A) =", len(A)
B = np.zeros(3)
print "B = \n", B
print "B.shape =", B.shape
print "len(B) =", len(B)
C = np.eye(1,3)
print "C = \n", C
print "C.shape =", C.shape
print "len(C) =", len(C)
# Si queremos forzar la misma forma que A y B
C = np.eye(1,3).flatten() # o np.eye(1,3)[0,:]
print "C = \n", C
print "C.shape =", C.shape
print "len(C) =", len(C)
# square arrays
A = np.ones((3,3))
print "A = \n", A
print "A.shape =", A.shape
print "len(A) =", len(A)
B = np.zeros((3,3))
print "B = \n", B
print "B.shape =", B.shape
print "len(B) =", len(B)
C = np.eye(3) # Or np.eye(3,3)
print "C = \n", C
print "C.shape =", C.shape
print "len(C) =", len(C)
# fat 2d array
A = np.ones((2,5))
print "A = \n", A
print "A.shape =", A.shape
print "len(A) =", len(A)
B = np.zeros((2,5))
print "B = \n", B
print "B.shape =", B.shape
print "len(B) =", len(B)
C = np.eye(2,5)
print "C = \n", C
print "C.shape =", C.shape
print "len(C) =", len(C)
x = np.linspace(0., 1., 6)
A = x.reshape(3,2)
print "x = \n", x
print "A = \n", A
print "np.diag(x) = \n", np.diag(x)
print "np.diag(B) = \n", np.diag(A)
print ""
print "A.sum() = ", A.sum()
print "A.sum(axis=0) = ", A.sum(axis=0)
print "A.sum(axis=1) = ", A.sum(axis=1)
print ""
print "A.mean() = ", A.mean()
print "A.mean(axis=0) = ", A.mean(axis=0)
print "A.mean(axis=1) = ", A.mean(axis=1)
print ""
print "A.std() = ", A.std()
print "A.std(axis=0) = ", A.std(axis=0)
print "A.std(axis=1) = ", A.std(axis=1)
A = np.outer(np.arange(3),np.arange(3))
print A
# FIX ME
# FIX ME
# FIX ME
# FIX ME
# FIX ME
def mi_funcion(x):
f = 1 + x + x**3 + x**5 + np.sin(x)
return f
N = 5
x = np.linspace(-1,1,N)
y = mi_funcion(x)
# FIX ME
I = 0 # FIX ME
# FIX ME
print "Area bajo la curva: %.3f" %I
# Ilustración gráfica
x_aux = np.linspace(x.min(),x.max(),N**2)
fig = plt.figure(figsize=(12,8))
fig.gca().fill_between(x, 0, y, alpha=0.25)
plt.plot(x_aux, mi_funcion(x_aux), 'k')
plt.plot(x, y, 'r.-')
plt.show()
# Ejemplo de lectura de datos
data = np.loadtxt("data/cherry.txt")
print data.shape
print data
# Ejemplo de lectura de datos, saltandose 11 lineas y truncando a enteros
data_int = np.loadtxt("data/cherry.txt", skiprows=11).astype(int)
print data_int.shape
print data_int
# Guardando el archivo con un header en español
encabezado = "Diametro Altura Volumen (Valores truncados a numeros enteros)"
np.savetxt("data/cherry_int.txt", data_int, fmt="%d", header=encabezado)
%%bash
cat data/cherry_int.txt
# Leer datos
#FIX_ME#
# Convertir a mks
#FIX_ME#
# Guardar en nuevo archivo
#FIX_ME#
x = np.linspace(0,42,10)
print "x = ", x
print "x.shape = ", x.shape
print "\n"
mask_x_1 = x>10
print "mask_x_1 = ", mask_x_1
print "x[mask_x_1] = ", x[mask_x_1]
print "x[mask_x_1].shape = ", x[mask_x_1].shape
print "\n"
mask_x_2 = x > x.mean()
print "mask_x_2 = ", mask_x_2
print "x[mask_x_2] = ", x[mask_x_2]
print "x[mask_x_2].shape = ", x[mask_x_2].shape
A = np.linspace(10,20,12).reshape(3,4)
print "\n"
print "A = ", A
print "A.shape = ", A.shape
print "\n"
mask_A_1 = A>13
print "mask_A_1 = ", mask_A_1
print "A[mask_A_1] = ", A[mask_A_1]
print "A[mask_A_1].shape = ", A[mask_A_1].shape
print "\n"
mask_A_2 = A > 0.5*(A.min()+A.max())
print "mask_A_2 = ", mask_A_2
print "A[mask_A_2] = ", A[mask_A_2]
print "A[mask_A_2].shape = ", A[mask_A_2].shape
T = np.linspace(-100,100,24).reshape(2,3,4)
print "\n"
print "T = ", T
print "T.shape = ", T.shape
print "\n"
mask_T_1 = T>=0
print "mask_T_1 = ", mask_T_1
print "T[mask_T_1] = ", T[mask_T_1]
print "T[mask_T_1].shape = ", T[mask_T_1].shape
print "\n"
mask_T_2 = 1 - T + 2*T**2 < 0.1*T**3
print "mask_T_2 = ", mask_T_2
print "T[mask_T_2] = ", T[mask_T_2]
print "T[mask_T_2].shape = ", T[mask_T_2].shape
x = np.linspace(10,20,11)
print "x = ", x
print "x.shape = ", x.shape
print "\n"
ind_x_1 = np.array([1,2,3,5,7])
print "ind_x_1 = ", ind_x_1
print "x[ind_x_1] = ", x[ind_x_1]
print "x[ind_x_1].shape = ", x[ind_x_1].shape
print "\n"
ind_x_2 = np.array([0,0,1,2,3,4,5,6,7,-3,-2,-1,-1])
print "ind_x_2 = ", ind_x_2
print "x[ind_x_2] = ", x[ind_x_2]
print "x[ind_x_2].shape = ", x[ind_x_2].shape
A = np.linspace(-90,90,10).reshape(2,5)
print "A = ", A
print "A.shape = ", A.shape
print "\n"
ind_row_A_1 = np.array([0,0,0,1,1])
ind_col_A_1 = np.array([0,2,4,1,3])
print "ind_row_A_1 = ", ind_row_A_1
print "ind_col_A_1 = ", ind_col_A_1
print "A[ind_row_A_1,ind_col_A_1] = ", A[ind_row_A_1,ind_col_A_1]
print "A[ind_row_A_1,ind_col_A_1].shape = ", A[ind_row_A_1,ind_col_A_1].shape
print "\n"
ind_row_A_2 = 1
ind_col_A_2 = np.array([0,1,3])
print "ind_row_A_2 = ", ind_row_A_2
print "ind_col_A_2 = ", ind_col_A_2
print "A[ind_row_A_2,ind_col_A_2] = ", A[ind_row_A_2,ind_col_A_2]
print "A[ind_row_A_2,ind_col_A_2].shape = ", A[ind_row_A_2,ind_col_A_2].shape
import numpy as np
k = 0.8
rho = 1.2 #
r_m = np.array([ 25., 25., 25., 25., 25., 25., 20., 20., 20., 20., 20.])
v_kmh = np.array([10.4, 12.6, 9.7, 7.2, 12.3, 10.8, 12.9, 13.0, 8.6, 12.6, 11.2]) # En kilometros por hora
P = 0
n_activos = 0
P_mean = 0.0
P_total = 0.0
print "Existen %d aerogeneradores activos del total de %d" %(n_activos, r.shape[0])
print "La potencia promedio de los aeorgeneradores es {0:.2f} ".format(P_mean)
print "La potencia promedio de los aeorgeneradores es " + str(P_total)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Planet OS API demo for GEFS
Step2: GEFS is a model with lots of output variables, which may also change depending of which particular output file you are checking. Analyse the metadata first, filter for variables we may be interested in and limit the API request.
Step3: Filter by parameter name, in this example we wan't to find pressure at surface.
Step4: API request for precipitation
Step5: API request for surface pressure
Step6: Read data from JSON responce and convert to numpy array for easier plotting
Step7: Precipitation plots
Step8: From simple distribution it is immediately visible that ensamble members may have very different values at particular time. Interpretation of this is highly dependent on physical quantity
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import dateutil.parser
import datetime
from urllib.request import urlopen, Request
import simplejson as json
import pandas as pd
def extract_reference_time(API_data_loc):
Find reference time that corresponds to most complete forecast. Should be the earliest value.
reftimes = set()
for i in API_data_loc['entries']:
reftimes.update([i['axes']['reftime']])
reftimes=list(reftimes)
if len(reftimes)>1:
reftime = reftimes[0] if dateutil.parser.parse(reftimes[0])<dateutil.parser.parse(reftimes[1]) else reftimes[1]
else:
reftime = reftimes[0]
return reftime
#latitude = 21.205
#longitude = -158.35
latitude = 58
longitude = 26
apikey = open('APIKEY').read().strip()
num_ens = 10
prec_var = "Total_precipitation_surface_6_Hour_Accumulation_ens"
pres_var = "Pressure_surface_ens"
API_meta_url = "http://api.planetos.com/v1/datasets/noaa-ncep_gefs?apikey={}".format(apikey)
request = Request(API_meta_url)
response = urlopen(request)
API_meta = json.loads(response.read())
print(API_meta_url)
[i['name'] for i in API_meta['Variables'] if 'pressure' in i['name'].lower() and 'surface' in i['name'].lower()]
API_url = "http://api.planetos.com/v1/datasets/noaa-ncep_gefs/point?lon={0}&lat={1}&count=2000&verbose=false&apikey={2}&var={3}".format(longitude,latitude,apikey,prec_var)
request = Request(API_url)
response = urlopen(request)
API_data_prec = json.loads(response.read())
print(API_url)
API_url = "http://api.planetos.com/v1/datasets/noaa-ncep_gefs/point?lon={0}&lat={1}&count=2000&verbose=false&apikey={2}&var={3}".format(longitude,latitude,apikey,pres_var)
request = Request(API_url)
response = urlopen(request)
API_data_pres = json.loads(response.read())
print(API_url)
## first collect data to dictionaries, then convert to Pandas DataFrame
pres_data_dict = {}
pres_time_dict = {}
prec_data_dict = {}
prec_time_dict = {}
for i in range(0, num_ens):
pres_data_dict[i] = []
pres_time_dict[i] = []
prec_data_dict[i] = []
prec_time_dict[i] = []
for i in API_data_pres['entries']:
reftime = extract_reference_time(API_data_pres)
if reftime == i['axes']['reftime']:
## print("reftest", int(i['axes']['ens']))
pres_data_dict[int(i['axes']['ens'])].append(i['data'][pres_var])
pres_time_dict[int(i['axes']['ens'])].append(dateutil.parser.parse(i['axes']['time']))
for i in API_data_prec['entries']:
reftime = extract_reference_time(API_data_prec)
if reftime == i['axes']['reftime']:
prec_data_dict[int(i['axes']['ens'])].append(i['data'][prec_var])
prec_time_dict[int(i['axes']['ens'])].append(dateutil.parser.parse(i['axes']['time']))
## check if time scales are equal?!
for i in range(2,num_ens):
##print(i, np.array(pres_time_dict[1]).shape, np.array(pres_time_dict[i]).shape)
if np.amax(np.array(pres_time_dict[1])-np.array(pres_time_dict[i])) != datetime.timedelta(0):
print('timeproblem',np.amax(np.array(pres_time_dict[1])-np.array(pres_time_dict[i])))
pres_pd = pd.DataFrame(pres_data_dict)
prec_pd = pd.DataFrame(prec_data_dict)
prec_pd
fig, (ax0, ax2) = plt.subplots(nrows=2,figsize=(20,12))
ax0.boxplot(prec_pd)
ax0.grid()
ax0.set_title("Simple ensamble distribution")
ax0.set_ylabel('Precipitation mm/6h')
ax2.boxplot(np.cumsum(prec_pd,axis=0))
ax2.grid()
ax2.set_title("Cumulative precipitation distribution")
ax2.set_ylabel('Precipitation mm/6h')
ax2.set_xlabel('Forecast steps (each is 6h)')
fig=plt.figure(figsize=(20,10))
plt.boxplot(pres_pd)
plt.grid()
plt.title('Ensamble distribution')
plt.ylabel('Pressure Pa')
plt.xlabel('Forecast steps (each is 6h)')
fig=plt.figure(figsize=(20,10))
plt.plot(pres_pd)
plt.grid()
plt.ylabel('Pressure Pa')
plt.xlabel('Forecast steps (each is 6h)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def findstring(s ) :
n = len(s )
s = list(s )
i = 1
while i < n - 1 :
if(s[i - 1 ] == '0' and s[i + 1 ] == '0' ) :
s . pop(i )
i -= 1
if i > 0 and s[i - 1 ] == '0' :
i -= 1
n = len(s )
i += 1
return ' ' . join(s )
if __name__== ' __main __' :
print(findstring('100100' ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cada celda la puedes usar para escribir el código que tu quieras y si de repente se te olvida alguna función o tienes duda de si el nombre es correcto IPython es muy amable en ese sentido.
Step2: Ejercicio 3
Step4: La gráfica que estás viendo sigue la siguiente ecuación $$y=x^2$$
|
<ASSISTANT_TASK:>
Python Code:
# Lo primero que ejecutarás será 'Hola Jupyter'
print('Hola Jupyter')
variable = 50
saludo = 'Hola'
# Importa matplotlib (paquete para graficar) y numpy (paquete para arreglos).
# Fíjate en el la función mágica para que aparezca nuestra gráfica en la celda.
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# Crea un arreglo de 30 valores para x que va de 0 a 5.
x = np.linspace(0, 5, 30)
y = x**2
# grafica y versus x
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(x, y, color='red')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('A simple graph of $y=x^2$')
# Importa matplotlib y numpy
# con la misma "magia".
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# Importa la función interactiva de IPython usada
# para construir los widgets interactivos
from IPython.html.widgets import interact
def plot_sine(frequency=4.0, grid_points=12, plot_original=True):
Grafica muestras discretas de una curva sinoidal en ``[0, 1]``.
x = np.linspace(0, 1, grid_points + 2)
y = np.sin(2 * frequency * np.pi * x)
xf = np.linspace(0, 1, 1000)
yf = np.sin(2 * frequency * np.pi * xf)
fig, ax = plt.subplots(figsize=(8, 6))
ax.set_xlabel('x')
ax.set_ylabel('signal')
ax.set_title('Aliasing in discretely sampled periodic signal')
if plot_original:
ax.plot(xf, yf, color='red', linestyle='solid', linewidth=2)
ax.plot(x, y, marker='o', linewidth=2)
# la función interactiva construye automáticamente una interfase de usuario para explorar
# la gráfica de la función de seno.
interact(plot_sine, frequency=(1.0, 22.0, 0.5), grid_points=(10, 16, 1), plot_original=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Model A
Step2: Create Model B
Step3: SHAP Values
Step4: Saabas Values
Step5: mean(abs(SHAP Values))
Step6: mean(abs(Saabas Values))
Step7: Split count
Step8: Gain
Step9: Permutation
Step10: Weighted Split Count
Step11: Make plot
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as pl
import numpy as np
import shap
import xgboost as xgb
N = 2000
X = np.zeros((N,2))
X[:1000,0] = 1
X[:500,1] = 1
X[1000:1500,1] = 1
yA = 80 * (X[:,0] * X[:,1]) + 1e-4 * ((X[:,0] == 0) * (X[:,1] == 0)) # last term forces the creation of left split
Xd = xgb.DMatrix(X)
# train a model with single tree
XdA = xgb.DMatrix(X, label=yA)
modelA = xgb.train({
'eta': 1, 'max_depth': 3, 'base_score': 0, "lambda": 0
}, XdA, 1)
print(modelA.get_dump(with_stats=True)[0])
yB = yA + X[:,1] * 10
# train a model with single tree
XdB = xgb.DMatrix(X, label=yB)
modelB = xgb.train({
'eta': 1, 'max_depth': 3, 'base_score': 0, "lambda": 0
}, XdB, 1)
print(modelB.get_dump(with_stats=True)[0])
shap_valuesA = modelA.predict(Xd, pred_contribs=True)
shap_valuesA[0]
shap_valuesB = modelB.predict(Xd, pred_contribs=True)
shap_valuesB[0]
saabas_valuesA = modelA.predict(Xd, pred_contribs=True, approx_contribs=True)
saabas_valuesA[0]
saabas_valuesB = modelB.predict(Xd, pred_contribs=True, approx_contribs=True)
saabas_valuesB[0]
np.abs(shap_valuesA).mean(0)
np.abs(shap_valuesB).mean(0)
np.abs(saabas_valuesA).mean(0)
np.abs(saabas_valuesB).mean(0)
tmp = modelA.get_score(importance_type="weight")
splitsA_fever = tmp["f0"]
splitsA_cough = tmp["f1"]
splitsA_fever,splitsA_cough
tmp = modelB.get_score(importance_type="weight")
splitsB_fever = tmp["f0"]
splitsB_cough = tmp["f1"]
splitsB_fever,splitsB_cough
tmp = modelA.get_score(importance_type="gain")
gainA_fever = tmp["f0"]*splitsA_fever
gainA_cough = tmp["f1"]*splitsA_cough
total = gainA_fever+gainA_cough
gainA_fever /= total / 100
gainA_cough /= total / 100
gainA_fever,gainA_cough
tmp["f0"]
tmp["f1"]
tmp = modelB.get_score(importance_type="gain")
gainB_fever = tmp["f0"] * splitsB_fever
gainB_cough = tmp["f1"] * splitsB_cough
total = gainB_fever + gainB_cough
gainB_fever /= total / 100
gainB_cough /= total / 100
gainB_fever, gainB_cough
tmp["f0"]*splitsB_fever/2000
tmp["f1"]*splitsB_cough
1250000.0/2000
(90+10+0+0)/4
((90-25)**2 + (10-25)**2 + (0-25)**2 + (0-25)**2)/4
((90-25)**2 + (10-25)**2 + (0-25)**2 + (0-25)**2)/4
((90-50)**2 + (10-50)**2 + (0-0)**2 + (0-0)**2)/4
def permute_importance(model, y):
vals_fever = []
Xtmp = X.copy()
inds = list(range(Xtmp.shape[0]))
for i in range(1000):
np.random.shuffle(inds)
Xtmp[:,0] = Xtmp[inds,0]
err = y - model.predict(xgb.DMatrix(Xtmp))
vals_fever.append(np.mean(np.sqrt(err*err)))
vals_cough = []
Xtmp = X.copy()
inds = list(range(Xtmp.shape[0]))
for i in range(1000):
np.random.shuffle(inds)
Xtmp[:,1] = Xtmp[inds,1]
err = y - model.predict(xgb.DMatrix(Xtmp))
vals_cough.append(np.mean(np.sqrt(err*err)))
return np.mean(vals_fever),np.mean(vals_cough)
permuteA_fever,permuteA_cough = permute_importance(modelA, yA)
permuteA_fever,permuteA_cough
permuteB_fever,permuteB_cough = permute_importance(modelB, yB)
permuteB_fever,permuteB_cough
modelA.get_score(importance_type="cover")
modelB.get_score(importance_type="cover")
# fever
f = pl.figure(figsize=(7,6))
pl.subplot(1,2,1)
d = 2
values_A = [
permuteA_fever,
splitsA_fever,
gainA_fever,
np.abs(shap_valuesA).mean(0)[0],
saabas_valuesA[0,0],
shap_valuesA[0,0]
]
display_A = [str(int(round(v))) for v in values_A]
display_A[2] = str(int(display_A[2]))+"%"
positions_A = [
1,
4,
7,
10,
13+d,
16+d
]
values_B = [
permuteA_cough,
splitsA_cough,
gainA_cough,
np.abs(shap_valuesA).mean(0)[1],
saabas_valuesA[0,1],
shap_valuesA[0,1]
]
display_B = [str(int(round(v))) for v in values_B]
display_B[2] = str(int(display_B[2]))+"%"
positions_B = [
0,
3,
6,
9,
12+d,
15+d
]
pl.barh(positions_A, values_A, color="#008BE0")
pl.barh(positions_B, values_B, color="#008BE0")
pl.yticks([])
pl.axis('off')
for i, v in enumerate(values_A):
pl.text(v + 3, positions_A[i]-0.25, str(display_A[i]), color='#008BE0', fontweight='bold')
for i, v in enumerate(values_B):
pl.text(v + 3, positions_B[i]-0.25, str(display_B[i]), color='#008BE0', fontweight='bold')
# cough
pl.subplot(1,2,2)
d = 2
values_A = [
permuteB_fever,
splitsB_fever,
gainB_fever,
np.abs(shap_valuesB).mean(0)[0],
saabas_valuesB[0,0],
shap_valuesB[0,0]
]
display_A = [str(int(round(v))) for v in values_A]
display_A[2] = display_A[2]+"%"
positions_A = [
1,
4,
7,
10,
13+d,
16+d
]
values_B = [
permuteB_cough,
splitsB_cough,
gainB_cough,
np.abs(shap_valuesB).mean(0)[1],
saabas_valuesB[0,1],
shap_valuesB[0,1]
]
display_B = [str(int(round(v))) for v in values_B]
display_B[2] = str(int(display_B[2]))+"%"
positions_B = [
0,
3,
6,
9,
12+d,
15+d
]
pl.barh(positions_A, values_A, color="#FF165A")
pl.barh(positions_B, values_B, color="#FF165A")
pl.yticks([])
pl.axis('off')
for i, v in enumerate(values_A):
pl.text(v + 3, positions_A[i]-0.25, str(display_A[i]), color='#FF165A', fontweight='bold')
for i, v in enumerate(values_B):
pl.text(v + 3, positions_B[i]-0.25, str(display_B[i]), color='#FF165A', fontweight='bold')
pl.show()
#pl.savefig("data/bar.pdf")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 1) How does gradient checking work?
Step4: Expected Output
Step6: Expected Output
Step8: Expected Output
Step10: Now, run backward propagation.
Step12: You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct.
Step14: Expected output
|
<ASSISTANT_TASK:>
Python Code:
# Packages
import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector
# GRADED FUNCTION: forward_propagation
def forward_propagation(x, theta):
Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
### START CODE HERE ### (approx. 1 line)
J = theta * x
### END CODE HERE ###
return J
x, theta = 2, 4
J = forward_propagation(x, theta)
print ("J = " + str(J))
# GRADED FUNCTION: backward_propagation
def backward_propagation(x, theta):
Computes the derivative of J with respect to theta (see Figure 1).
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
### START CODE HERE ### (approx. 1 line)
dtheta = x
### END CODE HERE ###
return dtheta
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print ("dtheta = " + str(dtheta))
# GRADED FUNCTION: gradient_check
def gradient_check(x, theta, epsilon = 1e-7):
Implement the backward propagation presented in Figure 1.
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
# Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit.
### START CODE HERE ### (approx. 5 lines)
thetaplus = theta + epsilon # Step 1
thetaminus = theta - epsilon # Step 2
J_plus = forward_propagation(x, thetaplus) # Step 3
J_minus = forward_propagation(x, thetaminus) # Step 4
gradapprox = (J_plus - J_minus) / (2 * epsilon) # Step 5
### END CODE HERE ###
# Check if gradapprox is close enough to the output of backward_propagation()
### START CODE HERE ### (approx. 1 line)
grad = backward_propagation(x, theta)
### END CODE HERE ###
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator / denominator # Step 3'
### END CODE HERE ###
if difference < 1e-7:
print ("The gradient is correct!")
else:
print ("The gradient is wrong!")
return difference
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
def forward_propagation_n(X, Y, parameters):
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1./m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
def backward_propagation_n(X, Y, cache):
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T) * 2
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
# GRADED FUNCTION: gradient_check_n
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters.
x -- input datapoint, of shape (input size, 1)
y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
# "_" is used because the function you have to outputs two parameters but we only care about the first one
### START CODE HERE ### (approx. 3 lines)
thetaplus = np.copy(parameters_values) # Step 1
thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3
### END CODE HERE ###
# Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
### START CODE HERE ### (approx. 3 lines)
thetaminus = np.copy(parameters_values) # Step 1
thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3
### END CODE HERE ###
# Compute gradapprox[i]
### START CODE HERE ### (approx. 1 line)
gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
### END CODE HERE ###
# Compare gradapprox to backward propagation gradients by computing difference.
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator / denominator # Step 3'
### END CODE HERE ###
if difference > 1e-7:
print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m")
else:
print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m")
return difference
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
def backward_propagation_n(X, Y, cache):
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Merging Dataframes Using pd.merge()
Step2: Merging Dataframes
Step3: Similary, you can merge the other dimension tables - shipping_df and orders_df to create a master_df and perform indexing using any column in the master dataframe.
Step4: Similary, you can perform left, right and outer merges (joins) by using the argument how = 'left' / 'right' / 'outer'.
Step5: Concatenating Dataframes Having the Same Rows
Step6: Note that you can also use the pd.concat() method to merge dataframes using common keys, though here we will not discuss that. For simplicity, we have used the pd.merge() method for database-style merging and pd.concat() for appending dataframes having no common columns.
Step7: Notice that there are a lot of NaN values. This is because some teams which played in IPL 2017 were not present in IPL 2018. In addition, there were also new teams present in IPL 2018. We can handle these NaN values by using df.add() instead of the simple add operator. Let's see how.
Step8: Also notice how the resultant dataframe is sorted by the index, i.e. 'IPL Team' alphabetically.
|
<ASSISTANT_TASK:>
Python Code:
# loading libraries and reading the data
import numpy as np
import pandas as pd
market_df = pd.read_csv("./global_sales_data/market_fact.csv")
customer_df = pd.read_csv("./global_sales_data/cust_dimen.csv")
product_df = pd.read_csv("./global_sales_data/prod_dimen.csv")
shipping_df = pd.read_csv("./global_sales_data/shipping_dimen.csv")
orders_df = pd.read_csv("./global_sales_data/orders_dimen.csv")
# Already familiar with market data: Each row is an order
market_df.head()
# Customer dimension table: Each row contains metadata about customers
customer_df.head()
# Product dimension table
product_df.head()
# Shipping metadata
shipping_df.head()
# Orders dimension table
orders_df.head()
# Merging the dataframes
# Note that Cust_id is the common column/key, which is provided to the 'on' argument
# how = 'inner' makes sure that only the customer ids present in both dfs are included in the result
df_1 = pd.merge(market_df, customer_df, how='inner', on='Cust_id')
df_1.head()
# Now, you can subset the orders made by customers from 'Corporate' segment
df_1.loc[df_1['Customer_Segment'] == 'CORPORATE', :]
# Example 2: Select all orders from product category = office supplies and from the corporate segment
# We now need to merge the product_df
df_2 = pd.merge(df_1, product_df, how='inner', on='Prod_id')
df_2.head()
# Select all orders from product category = office supplies and from the corporate segment
df_2.loc[(df_2['Product_Category']=='OFFICE SUPPLIES') & (df_2['Customer_Segment']=='CORPORATE'),:]
# Merging shipping_df
df_3 = pd.merge(df_2, shipping_df, how='inner', on='Ship_id')
df_3.shape
# Merging the orders table to create a master df
master_df = pd.merge(df_3, orders_df, how='inner', on='Ord_id')
master_df.shape
master_df.head()
# dataframes having the same columns
df1 = pd.DataFrame({'Name': ['Aman', 'Joy', 'Rashmi', 'Saif'],
'Age': ['34', '31', '22', '33'],
'Gender': ['M', 'M', 'F', 'M']}
)
df2 = pd.DataFrame({'Name': ['Akhil', 'Asha', 'Preeti'],
'Age': ['31', '22', '23'],
'Gender': ['M', 'F', 'F']}
)
df1
df2
# To concatenate them, one on top of the other, you can use pd.concat
# The first argument is a sequence (list) of dataframes
# axis = 0 indicates that we want to concat along the row axis
pd.concat([df1, df2], axis = 0)
# A useful and intuitive alternative to concat along the rows is the append() function
# It concatenates along the rows
df1.append(df2)
df1 = pd.DataFrame({'Name': ['Aman', 'Joy', 'Rashmi', 'Saif'],
'Age': ['34', '31', '22', '33'],
'Gender': ['M', 'M', 'F', 'M']}
)
df1
df2 = pd.DataFrame({'School': ['RK Public', 'JSP', 'Carmel Convent', 'St. Paul'],
'Graduation Marks': ['84', '89', '76', '91']}
)
df2
# To join the two dataframes, use axis = 1 to indicate joining along the columns axis
# The join is possible because the corresponding rows have the same indices
pd.concat([df1, df2], axis = 1)
# Teamwise stats for IPL 2018
IPL_2018 = pd.DataFrame({'IPL Team': ['CSK', 'SRH', 'KKR', 'RR', 'MI', 'RCB', 'KXIP', 'DD'],
'Matches Played': [16, 17, 16, 15, 14, 14, 14, 14],
'Matches Won': [11, 10, 9, 7, 6, 6, 6, 5]}
)
# Set the 'IPL Team' column as the index to perform arithmetic operations on the other rows using the team as reference
IPL_2018.set_index('IPL Team', inplace = True)
IPL_2018
# Similarly, we have the stats for IPL 2017
IPL_2017 = pd.DataFrame({'IPL Team': ['MI', 'RPS', 'KKR', 'SRH', 'KXIP', 'DD', 'GL', 'RCB'],
'Matches Played': [17, 16, 16, 15, 14, 14, 14, 14],
'Matches Won': [12, 10, 9, 8, 7, 6, 4, 3]}
)
IPL_2017.set_index('IPL Team', inplace = True)
IPL_2017
# Simply add the two DFs using the add opearator
Total = IPL_2018 + IPL_2017
Total
# The fill_value argument inside the df.add() function replaces all the NaN values in the two dataframes w.r.t. each other with zero.
Total = IPL_2018.add(IPL_2017, fill_value = 0)
Total
# Creating a new column - 'Win Percentage'
Total['Win Percentage'] = Total['Matches Won']/Total['Matches Played']
Total
# Sorting to determine the teams with most number of wins. If the number of wins of two teams are the same, sort by the win percentage.
Total.sort_values(by = (['Matches Won', 'Win Percentage']), ascending = False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You may have to set up your $CSCRATCH environment variable so that Python can find it, e.g.
Step2: These are some circular regions that could be masked, but let's check the capability also for elliptical regions
Step3: Note that the ellipticity components, here, which are defined at the bottom of the page at, e.g., http
Step4: Creating a file of targets and masking at the command line
Step5: Masking targets
Step6: Let's plot which objects are in masks and which are not, against the backdrop of the mask (in a small region of the sky)
Step7: Note that the BADSKY locations are just outside the perimeter of the masks, and are quite obvious in the plot.
Step8: Now let's mask that random catalog
Step9: and plot the random points that are and are not in the mask, both for the IN_RADIUS and the NEAR_RADIUS
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import os
import numpy as np
import fitsio
from desitarget import desi_mask, brightmask
os.environ["CSCRATCH"] = '/global/cscratch1/sd/adamyers'
sourcemask = fitsio.read("$CSCRATCH/sourcemask150.fits")
brightmask.plot_mask(sourcemask,limits=[151,150,1,2])
brightmask.plot_mask(sourcemask,limits=[151,150,1,2],radius="NEAR_RADIUS")
from desitarget.brightmask import _rexlike
from desitarget.cuts import _psflike
rex_or_psf = _rexlike(sourcemask["TYPE"]) | _psflike(sourcemask["TYPE"])
wcircle = np.where(rex_or_psf)
wellipse = np.where(~rex_or_psf)
sourcemask[wcircle][20:25]
sourcemask[wellipse][20:25]
brightmask.plot_mask(sourcemask,limits=[155.1,154.8,19.7,20.0],radius="NEAR_RADIUS")
targs = fitsio.read("$CSCRATCH/targs150.fits")
print(len(targs))
print(len(np.where( (targs["DESI_TARGET"] & desi_mask.BADSKY) != 0 )[0]))
targs = brightmask.append_safe_targets(targs,sourcemask)
print(len(targs))
print(len(np.where( (targs["DESI_TARGET"] & desi_mask.BADSKY) != 0 )[0]))
w = np.where( (targs["DESI_TARGET"] & desi_mask.BADSKY) != 0 )
badskies= targs[w]
brightmask.plot_mask(sourcemask,show=False)
plt.axis([155.1,154.8,19.7,20.0])
plt.plot(badskies["RA"],badskies["DEC"],'k,')
plt.xlabel('RA (o)')
plt.ylabel('Dec (o)')
plt.show()
desi_mask
dt = brightmask.set_target_bits(targs,sourcemask)
inmask = np.where( (dt & desi_mask.IN_BRIGHT_OBJECT) != 0)
masked = targs[inmask]
notinmask = np.where( (dt & desi_mask.IN_BRIGHT_OBJECT) == 0)
unmasked = targs[notinmask]
brightmask.plot_mask(sourcemask,show=False)
plt.axis([155.1,154.8,19.7,20.0])
plt.xlabel('RA (o)')
plt.ylabel('Dec (o)')
plt.plot(masked["RA"],masked["DEC"],'kx')
plt.plot(unmasked["RA"],unmasked["DEC"],'r.')
plt.show()
from numpy.random import random
Nran = 100000
rancat = np.zeros(Nran, dtype=[('RA', '>f8'), ('DEC', '>f8')])
rancat["RA"] = 154.8+0.3*(random(Nran))
rancat["DEC"] = np.degrees(np.arcsin(np.sin(np.radians(20))-random(Nran)*0.05))
inmask, nearmask = brightmask.is_in_bright_mask(rancat,sourcemask)
masked = rancat[np.where(inmask)]
notmasked = rancat[np.where(~inmask)]
near = rancat[np.where(nearmask)]
notnear = rancat[np.where(~nearmask)]
brightmask.plot_mask(sourcemask,show=False)
plt.axis([155.1,154.8,19.7,20.0])
plt.xlabel('RA (o)')
plt.ylabel('Dec (o)')
plt.plot(masked["RA"],masked["DEC"],'r.')
plt.plot(notmasked["RA"],notmasked["DEC"],'g,')
plt.show()
brightmask.plot_mask(sourcemask,show=False,radius="NEAR_RADIUS")
plt.axis([155.1,154.8,19.7,20.0])
plt.xlabel('RA (o)')
plt.ylabel('Dec (o)')
plt.plot(near["RA"],near["DEC"],'r.')
plt.plot(notnear["RA"],notnear["DEC"],'g,')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Bermuda Weather Radar
|
<ASSISTANT_TASK:>
Python Code:
data.pressure[-1*24*24:].plot()
# See how this compares to "normal" pressure
# Plot the last 10 days
data.pressure[-10*24*24:].plot()
data.tail()
!pwd
from IPython import display
display.Image('../galleries/Joaquin/joaquin.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exposé
Step2: Données départements
Step3: Il faudrait aussi fusionner avec la population de chaque département. Ce sera pour une autre fois.
Step4: On enlève tous les départements à trois chiffres.
Step5: Carte COVID
Step6: Les régions les plus peuplées ont sans doute la plus grande capacité hospitalière. Il faudrait diviser par cette capacité pour avoir une carte qui ait un peu plus de sens. Comme l'idée est ici de simplement tracer la carte, on ne calculera pas de ratio.
Step7: La création de carte a toujours été plus ou moins compliqué. Les premiers notebooks que j'ai créés sur le sujet étaient beaucoup plus complexe. geopandas a simplifié les choses. Son développement a commencé entre 2013 et a bien évolué depuis. Et j'ai dû passer quelques heures à récupérer les contours des départements il y a cinq ans.
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
%matplotlib inline
# https://www.data.gouv.fr/fr/datasets/donnees-hospitalieres-relatives-a-lepidemie-de-covid-19/
from pandas import read_csv
url = "https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7"
covid = read_csv(url, sep=";")
covid.tail()
last_day = covid.loc[covid.index[-1], "jour"]
last_day
last_data = covid[covid.jour == last_day].groupby("dep").sum()
last_data.shape
last_data.describe()
last_data.head()
last_data.tail()
import geopandas
# dernier lien de la page (format shapefiles)
url = "https://www.data.gouv.fr/en/datasets/r/ed02b655-4307-4db4-b1ca-7939145dc20f"
geo = geopandas.read_file(url)
geo.tail()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(5, 4))
geo.plot(ax=ax, color='white', edgecolor='black');
codes = [_ for _ in set(geo.code_depart) if len(_) < 3]
metropole = geo[geo.code_depart.isin(codes)]
metropole.tail()
fig, ax = plt.subplots(1, 1, figsize=(5, 4))
metropole.plot(ax=ax, color='white', edgecolor='black')
ax.set_title("%s départements" % metropole.shape[0]);
merged = last_data.reset_index(drop=False).merge(metropole, left_on="dep", right_on="code_depart")
merged.shape
merged.tail()
fig, ax = plt.subplots(1, 1, figsize=(5, 4))
merged.hist('rea', bins=20, ax=ax)
ax.set_title("Distribution rea");
merged.sort_values('rea').tail()
geomerged = geopandas.GeoDataFrame(merged)
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig, ax = plt.subplots(1, 1)
# ligne à ajouter pour avoir une légende ajustée à la taille du graphe
cax = make_axes_locatable(ax).append_axes("right", size="5%", pad=0.1)
geomerged.plot(column="rea", ax=ax, edgecolor='black', legend=True, cax=cax)
ax.set_title("Réanimations pour les %d départements" % metropole.shape[0]);
capacite = covid.groupby(["jour", "dep"]).sum().groupby("dep").max()
capacite.head()
capa_merged = merged.merge(capacite, left_on="dep", right_on="dep")
capa_merged["occupation"] = capa_merged["rea_x"] / capa_merged["rea_y"]
capa_merged.head(n=2).T
geocapa = geopandas.GeoDataFrame(capa_merged)
fig, ax = plt.subplots(1, 1)
# ligne à ajouter pour avoir une légende ajustée à la taille du graphe
cax = make_axes_locatable(ax).append_axes("right", size="5%", pad=0.1)
geocapa.plot(column="occupation", ax=ax, edgecolor='black', legend=True, cax=cax)
ax.set_title("Occupations en réanimations pour les %d départements" % metropole.shape[0]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download the NuSTAR TLE archive.
Step2: Here is where we define the observing window that we want to use.
Step3: We want to know how to orient NuSTAR for the Sun.
Step4: Set up the offset you want to use here
Step5: Loop over each orbit and correct the pointing for the same heliocentric pointing position.
Step6: Sanity check
|
<ASSISTANT_TASK:>
Python Code:
fname = io.download_occultation_times(outdir='../data/')
print(fname)
tlefile = io.download_tle(outdir='../data')
print(tlefile)
times, line1, line2 = io.read_tle_file(tlefile)
tstart = '2017-09-11T00:00:00'
tend = '2017-09-15T00:00:00'
orbits = planning.sunlight_periods(fname, tstart, tend)
pa = planning.get_nustar_roll(tstart, 0)
print("NuSTAR Roll angle for Det0 in NE quadrant: {}".format(pa))
offset = [1100., -400.]*u.arcsec
for ind, orbit in enumerate(orbits):
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_sky_position(midTime, offset)
# print("Orbit: {}".format(ind))
print("Orbit start: {} Orbit end: {}".format(orbit[0].isoformat(), orbit[1].isoformat()))
print("Aim Time: {}".format(midTime.isoformat()))
print('Aim time: {} RA (deg): {} Dec (deg): {}'.format(midTime.isoformat(), sky_pos[0], sky_pos[1]))
print("")
aim_time = '2016-07-26T19:53:15.00'
offset = [1000, 150]*u.arcsec
sky_pos = planning.get_sky_position(aim_time, offset)
print(sky_pos)
np = planning.get_nustar_roll(aim_time, 0)
print(np)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: defines the folder where the JSON files are (for the datasets) and where to save the proxy JSON files
Step2: instantiates a proxy instance
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.insert(0, '../')
from paleopy import proxy
from paleopy import analogs
from paleopy.plotting import scalar_plot
djsons = '../jsons/'
pjsons = '../jsons/proxies'
proxies = pd.read_excel('../data/ProxiesLIANZSWP.xlsx')
proxies.head()
for irow in proxies.index:
p = proxy(sitename=proxies.loc[irow,'Site'], \
lon = proxies.loc[irow,'Long'], \
lat = proxies.loc[irow,'Lat'], \
djsons = djsons, \
pjsons = pjsons, \
pfname = '{}.json'.format(proxies.loc[irow,'Site']), \
dataset = proxies.loc[irow,'dataset'], \
variable =proxies.loc[irow,'variable'], \
measurement ='delta O18', \
dating_convention = 'absolute', \
calendar = 'gregorian',\
chronology = 'historic', \
season = 'DJF', \
value = proxies.loc[irow,'Anom'], \
qualitative = 0, \
calc_anoms = 1, \
detrend = 1, \
method = 'quintiles')
p.find_analogs()
p.proxy_repr(pprint=True, outfile=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read the Gene Ontology
Step2: Lookup node properties
Step3: Create name mappings
Step4: Find parent or child relationships
Step5: Find all superterms of myelination
Step6: Find all subterms of myelination
Step7: Find all paths to the root
Step8: See the ontology metadata
Step9: Create a dictionary of obsolete terms to their replacements
|
<ASSISTANT_TASK:>
Python Code:
import networkx
import obonet
%%time
url = 'http://purl.obolibrary.org/obo/go/go-basic.obo'
graph = obonet.read_obo(url)
# Number of nodes
len(graph)
# Number of edges
graph.number_of_edges()
# Check if the ontology is a DAG
networkx.is_directed_acyclic_graph(graph)
# Retreive properties of phagocytosis
graph.nodes['GO:0006909']
# Retreive properties of pilus shaft
graph.nodes['GO:0009418']
id_to_name = {id_: data.get('name') for id_, data in graph.nodes(data=True)}
name_to_id = {data['name']: id_ for id_, data in graph.nodes(data=True) if 'name' in data}
# Get the name for GO:0042552
id_to_name['GO:0042552']
# Get the id for myelination
name_to_id['myelination']
# Find edges to parent terms
node = name_to_id['pilus']
for child, parent, key in graph.out_edges(node, keys=True):
print(f'• {id_to_name[child]} ⟶ {key} ⟶ {id_to_name[parent]}')
# Find edges to children terms
node = name_to_id['pilus']
for parent, child, key in graph.in_edges(node, keys=True):
print(f'• {id_to_name[child]} ⟵ {key} ⟵ {id_to_name[parent]}')
sorted(id_to_name[superterm] for superterm in networkx.descendants(graph, 'GO:0042552'))
sorted(id_to_name[subterm] for subterm in networkx.ancestors(graph, 'GO:0042552'))
paths = networkx.all_simple_paths(
graph,
source=name_to_id['starch binding'],
target=name_to_id['molecular_function']
)
for path in paths:
print('•', ' ⟶ '.join(id_to_name[node] for node in path))
graph.graph
graph_with_obs = obonet.read_obo(url, ignore_obsolete=False)
len(graph_with_obs)
old_to_new = dict()
for node, data in graph_with_obs.nodes(data=True):
for replaced_by in data.get("replaced_by", []):
old_to_new[node] = replaced_by
list(old_to_new.items())[:5]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What are artifacts?
Step2: Low-frequency drifts
Step3: Low-frequency drifts are readily removed by high-pass filtering at a fairly
Step4: Here we see narrow frequency peaks at 60, 120, 180, and 240 Hz — the power
Step5: The horizontal streaks in the magnetometer image plot reflect the fact that
Step6: Here again we can visualize the spatial pattern of the associated field at
Step7: Or, we can get an ERP/F plot with
Step8: Ocular artifacts (EOG)
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(0, 60).load_data() # just use a fraction of data for speed here
ssp_projectors = raw.info['projs']
raw.del_proj()
mag_channels = mne.pick_types(raw.info, meg='mag')
raw.plot(duration=60, order=mag_channels, n_channels=len(mag_channels),
remove_dc=False)
fig = raw.plot_psd(tmax=np.inf, fmax=250, average=True)
# add some arrows at 60 Hz and its harmonics:
for ax in fig.axes[1:]:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
ax.arrow(x=freqs[idx], y=psds[idx] + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
ecg_epochs = mne.preprocessing.create_ecg_epochs(raw)
ecg_epochs.plot_image(combine='mean')
avg_ecg_epochs = ecg_epochs.average().apply_baseline((-0.5, -0.2))
avg_ecg_epochs.plot_topomap(times=np.linspace(-0.05, 0.05, 11))
avg_ecg_epochs.plot_joint(times=[-0.25, -0.025, 0, 0.025, 0.25])
eog_epochs = mne.preprocessing.create_eog_epochs(raw, baseline=(-0.5, -0.2))
eog_epochs.plot_image(combine='mean')
eog_epochs.average().plot_joint()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Macierz $A$ dla regresji liniowej wynosi
Step2: Współczynniki dokładnie będą wynosiły
Step3: Optymalizacja metodą iteracyjną,
Step4: Tensor flow - gradient descend
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
import numpy as np
import matplotlib.pyplot as plt
learning_rate = 0.01
training_epochs = 1000
display_step = 50
train_X = np.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = np.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
import numpy as np
M = np.vstack([np.ones_like(train_X),train_X]).T
M
print (np.dot(M.T,M))
print(np.dot(M.T,train_Y))
c = np.linalg.solve(np.dot(M.T,M),np.dot(M.T,train_Y))
c
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, c[1] * train_X + c[0], label='Fitted line')
plt.legend()
plt.close()
from scipy.optimize import minimize
def cost(c,x=train_X,y=train_Y):
return sum( (c[0]+x_*c[1]-y_)**2 for (x_,y_) in zip(x,y) )
cost([1,2])
res = minimize(cost, [1,1], method='nelder-mead', options={'xtol': 1e-8, 'disp': True})
res.x
x = np.linspace(-2,2,77)
y = np.linspace(-2,2,77)
X,Y = np.meshgrid(x,y)
cost([X,Y]).shape
plt.contourf( X,Y,np.log(cost([X,Y])),cmap='gray')
plt.plot(res.x[0],res.x[1],'o')
np.min(cost([X,Y]))
px=[]
py=[]
for i in range(20):
res = minimize(cost, [1,1], options={ 'maxiter':i})
px.append(res.x[0])
py.append(res.x[1])
print(res.x)
plt.plot(px,py,'ro-')
import sympy
from sympy.abc import x,y
sympy.init_printing(use_latex='mathjax')
f_symb = cost([x,y]).expand()
f_symb.diff(x)
F = sympy.lambdify((x,y),f_symb,np)
Fx = sympy.lambdify((x,y),f_symb.diff(x),np)
Fy = sympy.lambdify((x,y),f_symb.diff(y),np)
F(1,1),cost([1,1])
x0,y0 = -1,1
h = 0.01/(2*17)
for i in range(500):
plt.plot(x0,y0,'go')
#print(i,x0,y0)
x0 += -h * Fx(x0,y0)
y0 += -h * Fy(x0,y0)
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(1.0, name="weight")
b = tf.Variable(1.0, name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# TEST
with tf.Session() as sess:
sess.run(init)
sess.run(tf.assign(W,1.0))
sess.run(tf.assign(b,2.0))
print(sess.run(b),sess.run(cost, feed_dict={X: train_X, Y: train_Y}))
# Launch the graph
x_tf_lst = []
y_tf_lst = []
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
x_tf_lst.append(sess.run(b))
y_tf_lst.append(sess.run(W))
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print ("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
plt.plot(x_tf_lst,y_tf_lst,'yo')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 2
Step2: C
Step3: D
Step5: <a id='prob1ans'></a>
|
<ASSISTANT_TASK:>
Python Code:
# To begin, define the prior as the probability of the car being behind door i (i=1,2,3), call this "pi".
# Note that pi is uniformly distributed.
p1 = ?
p2 = ?
p3 = ?
# Next, to define the class conditional, we need three pieces of information. Supposing Monty reveals door 3,
# we must find:
# probability that Monty reveals door 3 given door 3 wins (call this c3)
# probability that Monty reveals door 3 given door 2 wins (call this c2)
# probability that Monty reveals door 3 given door 1 wins (call this c1)
#
# For this, suppose you initially choose door 1.
c3 = ?
c2 = ?
c1 = ?
#Now we need find the marginal for the choice of Monty, call this pd3. Hint: use the sum rule of probability and
# your previous calculations.
pd3 = ?
## Express all answers within this cell as a percentage
# The probability of winning if you stay with door 1 is:
print("Door 1: %(switch1).2f %%" %{"switch1":?})
# Finally, Bayes' rule tells us the probability of winning if you switch to door 2 is:
print("Door 2: %(switch2).2f %%" %{"switch2":?})
# The probability of winning if you switch to door 3 is:
print("Door 3: %(switch3).2f %%" %{"switch3":?})
# Distribution 1
p1Plus = ?
p1Minus = ?
# Distribution 2
p2Plus = ?
p2Minus = ?
# Class-conditional probabilities
pBplus = ?
pBminus = ?
pSplus = ?
pSminus = ?
#Start a section for the results under prior 1
scores1=[(pBplus*pSplus*p1Plus,'+'),(pBminus*pSminus*p1Minus,'-')]
class1 = list(max(scores1))
#Beginning of results
print('\033[1m'+"Results under prior 1" + '\033[0m')
# Posterior score for + under prior 1
print("Posterior score for + under prior 1 is $ %(postPlus).2f" %{"postPlus":scores1[0][0]})
# Posterior score for - under prior 1
print("Posterior score for - under prior 1 is $ %(postMinus).2f" %{"postMinus":scores1[1][0]})
# Classification under prior 1
print("The object is then of class %s" %class1[1])
#Start a section for the results under prior 2
scores2=[(pBplus*pSplus*p2Plus,'+'),(pBminus*pSminus*p2Minus,'-')]
class2 = list(max(scores2))
#Beginning of results
print('\033[1m'+"Results under prior 2" + '\033[0m')
# Posterior score for + under prior 2
print("Posterior score for + under prior 2 is $ %(postPlus).2f" %{"postPlus":scores2[0][0]})
# Posterior score for - under prior 2
print("Posterior score for - under prior 2 is $ %(postMinus).2f" %{"postMinus":scores2[1][0]})
# Classification under prior 2
print("The object is then of class %s" %class2[1])
from IPython.core.display import HTML
HTML(
<style>
.MathJax nobr>span.math>span{border-left-width:0 !important};
</style>
)
from IPython.display import Image
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the dataset 0750-0805
Step2: What is the number of different vehicles for the 15 min
Step3: 15min = 900 s = 9000 ms //
Step4: For every time stamp, check how many vehicles are accelerating when the one behind is also or not...
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from pandas import Series, DataFrame
import pandas as pd
from itertools import *
import numpy as np
import csv
import math
import matplotlib.pyplot as plt
from matplotlib import pylab
from scipy.signal import hilbert, chirp
import scipy
import networkx as nx
c_dataset = ['vID','fID', 'tF', 'Time', 'lX', 'lY', 'gX', 'gY', 'vLen', 'vWid', 'vType','vVel', 'vAcc', 'vLane', 'vPrec', 'vFoll', 'spac','headway' ]
dataset = pd.read_table('D:\\zzzLola\\PhD\\DataSet\\US101\\coding\\trajectories-0750am-0805am.txt', sep=r"\s+",
header=None, names=c_dataset)
dataset[:10]
numV = dataset['vID'].unique()
len(numV)
numTS = dataset['Time'].unique()
len(numTS)
#Converting to meters
dataset['lX'] = dataset.lX * 0.3048
dataset['lY'] = dataset.lY * 0.3048
dataset['gX'] = dataset.gX * 0.3048
dataset['gY'] = dataset.gY * 0.3048
dataset['vLen'] = dataset.vLen * 0.3048
dataset['vWid'] = dataset.vWid * 0.3048
dataset['spac'] = dataset.spac * 0.3048
dataset['vVel'] = dataset.vVel * 0.3048
dataset['vAcc'] = dataset.vAcc * 0.3048
dataset[:10]
dataset['tF'].describe()
des_all = dataset.describe()
des_all
des_all.to_csv('D:\\zzzLola\\PhD\\DataSet\\US101\\coding\\description_allDataset_160502.csv', sep='\t', encoding='utf-8')
dataset.to_csv('D:\\zzzLola\\PhD\\DataSet\\US101\\coding\\dataset_meters_160502.txt', sep='\t', encoding='utf-8',index=False)
#table.groupby('YEARMONTH').CLIENTCODE.nunique()
v_num_lanes = dataset.groupby('vID').vLane.nunique()
v_num_lanes[v_num_lanes > 1].count()
v_num_lanes[v_num_lanes == 1].count()
#Drop some field are not necessary for the time being.
dataset = dataset.drop(['fID','tF','lX','lY','vLen','vWid', 'vType','vVel', 'vAcc',
'vLane', 'vPrec', 'vFoll','spac','headway'], axis=1)
dataset[:10]
def save_graph(graph,file_name):
#initialze Figure
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
pos = nx.random_layout(graph) #spring_layout(graph)
nx.draw_networkx_nodes(graph,pos)
nx.draw_networkx_edges(graph,pos)
nx.draw_networkx_labels(graph,pos)
#cut = 1.00
#xmax = cut * max(xx for xx, yy in pos.values())
#ymax = cut * max(yy for xx, yy in pos.values())
#plt.xlim(0, xmax)
#plt.ylim(0, ymax)
plt.savefig(file_name,bbox_inches="tight")
pylab.close()
del fig
times = dataset['Time'].unique()
#data = pd.DataFrame()
#data = data.fillna(0) # with 0s rather than NaNs
dTime = pd.DataFrame()
for time in times:
#print 'Time %i ' %time
dataTime0 = dataset.loc[dataset['Time'] == time]
list_vIDs = dataTime0.vID.tolist()
#print list_vIDs
dataTime = dataTime0.set_index("vID")
#index_dataTime = dataTime.index.values
#print dataTime
perm = list(permutations(list_vIDs,2))
#print perm
dist = [((((dataTime.loc[p[0],'gX'] - dataTime.loc[p[1],'gX']))**2) +
(((dataTime.loc[p[0],'gY'] - dataTime.loc[p[1],'gY']))**2))**0.5 for p in perm]
dataDist = pd.DataFrame(dist , index=perm, columns = {'dist'})
#Create the fields vID and To
dataDist['FromTo'] = dataDist.index
dataDist['From'] = dataDist.FromTo.str[0]
dataDist['To'] = dataDist.FromTo.str[1]
#I multiply by 100 in order to scale the number
dataDist['weight'] = (1/dataDist.dist)*100
#Delete the intermediate FromTo field
dataDist = dataDist.drop('FromTo', 1)
graph = nx.from_pandas_dataframe(dataDist, 'From','To',['weight'])
save_graph(graph,'D:\\zzzLola\\PhD\\DataSet\\US101\\coding\\graphs\\%i_my_graph.png' %time)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There are a couple of points to mention about this API
Step2: Likewise, to remove a shortcut, use remove_shortcut
|
<ASSISTANT_TASK:>
Python Code:
%%javascript
IPython.keyboard_manager.command_shortcuts.add_shortcut('r', {
help : 'run cell',
help_index : 'zz',
handler : function (event) {
IPython.notebook.execute_cell();
return false;
}}
);
%%javascript
IPython.keyboard_manager.command_shortcuts.add_shortcut('r', function (event) {
IPython.notebook.execute_cell();
return false;
});
%%javascript
IPython.keyboard_manager.command_shortcuts.remove_shortcut('r');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load MNIST
Step2: Demonstrate the AutoEncoder
Step3: Fit and reconstruct using AutoEncoder
Step4: Show reconstruction examples
Step5: This looks pretty good! It is illustrative of what a vanilla auto-encoder is capable of
Step6: Data prep
Step7: Observe a VariationalAutoEncoder's generative ability
Step8: Show generation examples
Step9: Notice that the generated images are a bit blurry, generalized, and "play it safe," so-to-speak... none of the images denote the crisp character the original images do (or the decorative "tail" on the one). This is because the VAE must learn to generalize the characters.
Step10: How does SMRT stack up to SMOTE?
Step11: These synthetic examples don't seem to resemble the original dataset as well as SMRT's do. We can't be sure until we test against a classifier, however.
Step12: The following function will plot the performance of the validation set for varying balancing techniques
Step13: Now for different algorithms, let's see how we perform...
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import smrt
# this is our seed
seed = 42
# show versions for continuity
print("TensorFlow version: %s" % tf.__version__)
print("NumPy version: %s" % np.__version__)
print("SMRT version: %s" % smrt.__version__)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
# train test split
from sklearn.model_selection import train_test_split
all_data = np.asarray(mnist.train.images)
X_train, X_test = train_test_split(all_data, train_size=0.7, random_state=seed)
X_train.shape
# matplotlib is way too into the whole "warn for everything"
import warnings
def suppress_warnings(func):
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return func(*args, **kwargs)
return wrapper
@suppress_warnings
def show_mnist(array_a, array_b=None, array_c=None, nrow=2, ncol=10, figsize=None, save_loc=None):
# import without warnings
from matplotlib import pyplot as plt
%matplotlib inline
# if both are None, just plot one
if array_b is None and array_c is None:
nrow = 1
# if kw specifically makes B None, shift it over
elif array_b is None:
array_b = array_c
array_c = None
nrow = 2
# otherwise if just plotting the first two...
elif array_c is None:
nrow = 2
elif array_b is not None and array_c is not None:
nrow = 3
if nrow not in (1, 2, 3):
raise ValueError('nrow must be in (1, 2)')
if figsize is None:
figsize = (ncol, nrow)
f, a = plt.subplots(nrow, ncol, figsize=figsize)
arrays = [array_a, array_b, array_c]
def _do_show(the_figure, the_array):
the_figure.imshow(the_array)
the_figure.axis('off')
for i in range(ncol):
if nrow > 1:
for j in range(nrow):
_do_show(a[j][i], np.reshape(arrays[j][i], (28, 28)))
else:
_do_show(a[i], np.reshape(array_a[i], (28, 28)))
plt.subplots_adjust(wspace=0.1, hspace=0.1)
f.show()
plt.draw()
# if save...
if save_loc is not None:
plt.savefig(save_loc)
# draw some:
show_mnist(X_train)
from smrt.autoencode import AutoEncoder
from sklearn.metrics import mean_squared_error
# define the estimator. We will use L2 penalty to augment the generalizability of the encoder
ae = AutoEncoder(n_epochs=50, n_hidden=450, learning_rate=0.01, batch_size=256,
display_step=5, activation_function='sigmoid', verbose=2,
random_state=seed, clip=True, l2_penalty=1e-6,
early_stopping=True)
# fit
ae.fit(X_train)
# transform and reconstruct the test images
reconstructed = ae.feed_forward(X_test)
# get the error:
mse = ((X_test - reconstructed) ** 2).sum(axis=1).sum() / X_test.shape[0]
print("\nTest MSE: %.4f" % mse)
show_mnist(X_test, reconstructed)
# show training errors
@suppress_warnings
def plot_training_error(estimator):
# import without warnings
from matplotlib import pyplot as plt
%matplotlib inline
c = estimator.epoch_costs_
plt.plot(np.arange(len(c)) + 1, c)
plt.title('Training cost over time')
plt.ylabel('Training cost')
plt.xlabel('Epoch')
plt.show()
plot_training_error(ae)
# they're one-hot encoded right now. Flatten into a single vector
labels = np.asarray([np.argmax(row) for row in mnist.train.labels])
labels
from numpy.random import RandomState
import pickle
import subprocess
import os
import sys
rs = RandomState(seed)
fl = 'data/mnist/mnist.pkl'
# can we discern between a 1 and a 7? Might be tough...
min_label = 3
maj_label = 8
minority_size = 1000
majority_size = 100000
def _load_from_pickle():
with open(fl, 'rb') as f:
if (sys.version_info > (3, 0)):
# content of file encoded using Python 3
print("Encoded using Python 3")
d = pickle.load(f, encoding="latin1")
else:
# content of file encoded using Python 2
print("Encoded using Python 2")
d = pickle.load(f)
X, y = d['data'], d['labels']
min_mask = y == min_label
maj_mask = y == maj_label
return X, y, X[min_mask, :], y[min_mask], X[maj_mask, :], y[maj_mask]
# if the pickle is present, load/extract from it
if os.path.exists(fl):
print('Loading from pickle')
X, y, X_min, y_min, X_maj, y_maj = _load_from_pickle()
# otherwise if the zip file is there
elif os.path.exists(fl + '.zip'):
print('Loading from zip')
# unzip first
subprocess.Popen(['unzip', fl + '.zip'])
X, y, X_min, y_min, X_maj, y_maj = _load_from_pickle()
else:
print('Building dataset')
# create masks
arng = np.arange(labels.shape[0])
min_mask = labels == min_label
maj_mask = labels == maj_label
# get labels and data
min_choices = rs.choice(arng[min_mask], minority_size)
y_min = labels[min_choices]
X_min = all_data[min_choices, :]
# get images
maj_choices = rs.choice(arng[maj_mask], majority_size)
y_maj = labels[maj_choices]
X_maj = all_data[maj_choices, :]
X = np.vstack([X_min, X_maj])
y = np.concatenate([y_min, y_maj])
# pickle it
output_data = dict(data=X, labels=y)
with open(fl, 'wb') as od:
pickle.dump(output_data, od)
# zip it
subprocess.Popen(['zip', '-r', '%s.zip' % fl, fl])
# view shapes
print('Minority size: %i' % y_min.shape)
print('Majority size: %i' % y_maj.shape)
from sklearn.model_selection import train_test_split
# split high level
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=rs, stratify=y)
# now get the X_ones, X_zeros out of the train...
min_mask = y_train == min_label
maj_mask = ~min_mask
from smrt.autoencode import VariationalAutoEncoder
# The cost here is NOT MSE - it's the sum of kullback_leibler + MSE,
# so don't panic that the cost looks so high in relation to the other autoencoder
v_encoder = VariationalAutoEncoder(n_epochs=400, n_hidden=900, n_latent_factors=15,
learning_rate=0.075, batch_size=256, display_step=25,
activation_function='sigmoid', verbose=2, l2_penalty=None,
random_state=seed, early_stopping=True, dropout=0.4,
learning_function='sgd', clip=False)
v_encoder.fit(X_train[min_mask, :])
plot_training_error(v_encoder)
# Generate fake data:
fake = v_encoder.generate_from_sample(X_train[min_mask, :][:10])
show_mnist(X_train[min_mask, :][:10], fake)
from smrt.balance import smrt_balance
# we can balance with the estimator we fit above (if we like it, otherwise SMRT can balance on its own)
X_smrt, y_smrt = smrt_balance(X_train, y_train, n_hidden=900, n_latent_factors=10, random_state=seed,
shuffle=False, balance_ratio=0.5, return_estimators=False,
prefit_estimators={min_label: v_encoder})
# Let's view some of our synthetically-generated ONEs:
show_mnist(X_smrt[-10:])
from smrt.balance import smote_balance
X_smote, y_smote = smote_balance(X_train, y_train, random_state=seed, n_neighbors=25,
shuffle=False, balance_ratio=0.5)
# Let's view some of our perturbed ONEs:
show_mnist(X_smote[-10:])
show_mnist(X_train[min_mask, :][:10], X_smrt[-10:], X_smote[-10:], save_loc='img/mnist_smrt_smote.png')
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import (precision_score, recall_score, accuracy_score,
f1_score, cohen_kappa_score, roc_auc_score)
# define different smrt args
smrt_kwargs = dict(n_epochs=350, n_hidden=900, n_latent_factors=25,
learning_rate=0.06, batch_size=256, display_step=25,
activation_function='sigmoid', verbose=2, l2_penalty=None,
random_state=seed, early_stopping=True, dropout=0.8,
learning_function='sgd', clip=False, shuffle=False,
return_estimators=True, prefit_estimators={min_label: v_encoder})
# define different smote args
smote_kwargs = dict(random_state=seed, n_neighbors=25,
shuffle=True, return_estimators=True)
# encode our labels for AUC...
le = LabelEncoder()
le.fit([min_label, maj_label])
y_test_le = le.transform(y_test)
pos_label = le.transform([min_label])[0]
# if pos label is 0, invert it...
def _invert(x):
return (~x.astype(np.bool)).astype(np.int)
if pos_label == 0:
y_test_le = _invert(y_test_le)
# function for search
def fit_pred_report(clf, ratios=(0.025, 0.05, 0.075, 0.1,
0.125, 0.15, 0.175, 0.2,
0.225, 0.25, 0.275, 0.3,
0.325, 0.35, 0.375, 0.4,
0.425, 0.45, 0.475, 0.5,)):
print("--- %s ---" % clf.__class__.__name__)
def _fit_pred_report(name, x_tr, y_tr, ratio):
# make sure to binarize...
prds = clf.fit(x_tr, y_tr).predict(X_test)
prds = le.transform(prds)
# ROC requires binary data...
if pos_label == 0:
prds = _invert(prds)
# get the scores
pre = precision_score(y_test_le, prds)
rec = recall_score(y_test_le, prds)
acc = accuracy_score(y_test_le, prds)
f1 = f1_score(y_test_le, prds)
ck = cohen_kappa_score(y_test_le, prds)
auc = roc_auc_score(y_test_le, prds)
print("%s val (ratio=%.2f), accuracy=%.3f, precision=%.3f, "
"recall=%.3f, f1=%.3f, cohen-kappa=%.3f, auc=%.3f"
% (name, ratio, acc, pre, rec, f1, ck, auc))
return pre, rec, acc, f1, ck, auc
# go over each balancer
performances = {}
for balancer, nm, kw in ((smote_balance, "SMOTE", smote_kwargs),
(smrt_balance, "SMRT", smrt_kwargs)):
this_pre, this_rec, this_acc, this_f1, this_ck, this_auc = [], [], [], [], [], []
performances['%s-precision' % nm] = this_pre
performances['%s-recall' % nm] = this_rec
performances['%s-accuracy' % nm] = this_acc
performances['%s-f1' % nm] = this_f1
performances['%s-cohen.kappa' % nm] = this_ck
performances['%s-auc' % nm] = this_auc
# go over ratio levels
for ratio in ratios:
x_tr, y_tr, estimators = balancer(X_train, y_train, balance_ratio=ratio, **kw)
# fit the model, get the performance
pre, rec, acc, f1, ck, auc = _fit_pred_report(nm, x_tr, y_tr, ratio)
this_pre.append(pre)
this_rec.append(rec)
this_acc.append(acc)
this_f1.append(f1)
this_ck.append(ck)
this_auc.append(auc)
print()
output = pd.DataFrame.from_dict(performances)
output.index = ratios
return output
# show validation scores for different balance types
@suppress_warnings
def plot_metric_curves(df, style='darkgrid', linewidth=2, markersize=4, figsize=(15, 10),
tgts=('precision', 'recall', 'accuracy', 'auc', 'f1', 'cohen.kappa'),
linestyles=('-', ':', '--', '-.'), bal_names=('SMOTE', 'SMRT')):
# import without warnings
from matplotlib import pyplot as plt
import seaborn as sns
%matplotlib inline
# set style
sns.set_style(style)
f, axes = plt.subplots(3, 2, figsize=figsize)
x_axis = df.index.values
# flatten the axes (from a 3x2 to a 1x6)
axes = [axis for row in axes for axis in row]
for i, tgt in enumerate(tgts):
tplt = axes[i]
mn, mx = 1., 0. # init inverted so they get updated
# style up the metric name
metric = ' '.join(tgt.split('.')).title()
for i, nm in enumerate(bal_names):
bal = nm.split('-')[0]
y = df['%s-%s' % (nm, tgt)].values
tplt.plot(x_axis, y, label=bal, marker='o',
linestyle=linestyles[i], linewidth=linewidth,
markersize=markersize)
tplt.set_ylabel('Validation %s' % metric)
tplt.legend(loc='lower right')
# update min, max
mn = min(mn, y.min())
mx = max(mx, y.max())
# set lim
tplt.set_ylim((mn - .01, mx + .01))
plt.show()
from sklearn.linear_model import LogisticRegression
# logistic regression classifier
lr_performances = fit_pred_report(LogisticRegression(random_state=rs))
plot_metric_curves(lr_performances)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PDF is garbage, continued
Step2: Nope.
Step3: Here are all the URLs in the document...
Step4: Bleh. That is mostly links in the references, ads and navigation cruft
Step5: Much better. Now, let's see if these exist...
Step6: Looks like this will work, though we'll need to make a hand-curated list of
Step7: So far so good. Let's have a look at the url attribute...
Step8: Well, it looks like not all resources have URLs. Let's try looping over
Step9: Clearly, we need to expand the excluded URL list. And we need to match
Step10: This excluded list is getting sloppy as the author slowly lapses into
Step11: Some journals aggressivly ban and throttle IPs, so this process gets slow
|
<ASSISTANT_TASK:>
Python Code:
urlre = re.compile( '(?P<url>https?://[^\s]+)' )
for page in doc :
print urlre.findall( page )
urlre = re.compile( '(?P<url>https?://[^\s]+)' )
for page in doc :
print urlre.findall( page.replace('\n','') )
from sgmllib import SGMLParser
class URLLister(SGMLParser):
def reset(self):
SGMLParser.reset(self)
self.urls = []
def start_a(self, attrs):
href = [v for k, v in attrs if k=='href']
if href:
self.urls.extend(href)
def get_urls_from(url):
url_list = []
import urllib
usock = urllib.urlopen(url)
parser = URLLister()
parser.feed(usock.read())
usock.close()
parser.close()
map(url_list.append,
[item for item in parser.urls if item.startswith(('http', 'ftp', 'www'))])
return url_list
urls = get_urls_from('http://www.nature.com/ismej/journal/v10/n1/full/ismej2015100a.html')
urls
excluded = [ 'http://www.nature.com',
'http://dx.doi.org',
'http://www.ncbi.nlm.nih.gov',
'http://creativecommons.org',
'https://s100.copyright.com',
'http://mts-isme.nature.com',
'http://www.isme-microbes.org',
'http://ad.doubleclick.net',
'http://mse.force.com',
'http://links.isiglobalnet2.com',
'http://www.readcube.com',
'http://chemport.cas.org',
'http://publicationethics.org/',
'http://www.natureasia.com/'
]
def novel_url( url ) :
for excluded_url in excluded :
if url.startswith( excluded_url ) :
return False
return True
filter( novel_url, urls )
import requests
for url in filter( novel_url, urls ) :
request = requests.get( url )
if request.status_code == 200:
print 'Good : ', url
else:
print 'Fail : ', url
from pyzotero import zotero
api_key = open( 'zotero_api_key.txt' ).read().strip()
library_id = open( 'zotero_api_userID.txt' ).read().strip()
library_type = 'group'
group_id = '405341' # microBE.net group ID
zot = zotero.Zotero(group_id, library_type, api_key)
items = zot.top(limit=5)
# we've retrieved the latest five top-level items in our library
# we can print each item's item type and ID
for item in items:
#print('Item: %s | Key: %s') % (item['data']['itemType'], item['data']['key'])
print item['data']['key'], ':', item['data']['title']
for item in items:
print item['data']['key'], ':', item['data']['url']
for item in items:
paper_url = item['data']['url']
if paper_url.startswith( 'http' ) :
link_urls = get_urls_from( paper_url )
print item['data']['key']
for url in filter( novel_url, link_urls ) :
print ' ', url
excluded = [ 'nature.com',
'doi.org',
'ncbi.nlm.nih.gov',
'creativecommons.org',
'copyright.com',
'isme-microbes.org',
'doubleclick.net',
'force.com',
'isiglobalnet2.com',
'readcube.com',
'cas.org',
'publicationethics.org',
'natureasia.com',
'uq.edu.au',
'edx.org',
'facebook.com',
'instagram.com',
'youtube.com',
'flickr.com',
'twitter.com',
'go8.edu.au',
'google.com',
'vimeo.com',
'peerj.com',
'mendeley.com',
'cloudfront.net',
'webofknowledge.com',
'sciencedirect.com',
'aol.com',
'pinterest.com',
'scopus.com',
'live.com',
'exlibrisgroup.com',
'usyd.edu.au',
'academicanalytics.com',
'microbiomedigest.com',
'ask.com',
'sogou.com',
'ou.com',
'du.edu',
'ru.nl',
'freshdesk.com',
'caltech.edu',
'traackr.com',
'adobe.com',
'linkedin.com',
'feedly.com',
'google.co.uk',
'glgoo.org',
'library.wisc.edu',
'lib.fsu.edu',
'library.illinois.edu',
'exchange.ou.edu',
'lib.noaa.gov',
'innocentive.com',
'sfx.kcl.ac.uk',
'sfx.unimi.it',
'lib.utexas.edu',
'orcid.org',
]
def novel_url( url ) :
for excluded_url in excluded :
if url.__contains__( excluded_url ) :
return False
return True
for item in items:
paper_url = item['data']['url']
if paper_url.startswith( 'http' ) :
try :
link_urls = get_urls_from( paper_url )
print item['data']['key']
for url in list(set(filter( novel_url, link_urls ))) :
print ' ', url
except IOError :
print item['data']['key'], 'FAILED'
for item in items:
paper_url = item['data']['url']
if paper_url.startswith( 'http' ) :
try :
link_urls = get_urls_from( paper_url )
print item['data']['key']
for url in list(set(filter( novel_url, link_urls ))) :
request = requests.get( url )
if request.status_code == 200:
print ' Good : ', url
else:
print ' Fail : ', url
except IOError :
print item['data']['key'], 'FAILED'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 4. Prediction Intervals and Loops (19 Points + 12 EC)
Step2: 5. Normal Distribution (8 Points)
Step3: 5.3
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
#3.1
p = [0.2, 0.5, 0.8]
n = np.arange(1, 8)
for i, pi in enumerate(p):
plt.plot(n, pi * (1 - pi)**(n - 1), 'o-', label='$p={}$'.format(pi), color='C{}'.format(i))
plt.axvline(x = 1/ pi, color='C{}'.format(i))
plt.title('Problem 3.1 - Geometric')
plt.xlabel('$n$')
plt.ylabel('$P(n)$')
plt.legend()
plt.show()
#3.2
from scipy.special import comb,factorial
N = 4
p = 0.70
mu = N * p
x = np.arange(0, N+1)
plt.plot(x, comb(N, x) * p**x *(1 - p)**(N - x), 'o-', label='binomial')
plt.plot(x, np.exp(-mu) * mu**x / factorial(x), 'o-', label='Poisson')
plt.title('Problem 3.2 - Binomial vs Geometric')
plt.xlabel('$n$')
plt.ylabel('$P(n)$')
plt.legend()
plt.show()
#3.3
from scipy.special import comb,factorial
N = 25
p = 0.10
mu = N * p
x = np.arange(0, N+1)
plt.plot(x, comb(N, x) * p**x *(1 - p)**(N - x), 'o-', label='binomial')
plt.plot(x, np.exp(-mu) * mu**x / factorial(x), 'o-', label='Poisson')
plt.title('Problem 3.3 - Binomial vs Geometric')
plt.xlabel('$n$')
plt.ylabel('$P(n)$')
plt.legend()
plt.show()
#3.4
L = 1 / 4
t = np.linspace(0,7,100)
tsmall = np.linspace(0,5,100)
plt.plot(t, L * np.exp(-L * t))
plt.fill_between(tsmall, 0, L * np.exp(-L * tsmall))
plt.axvline(x=5)
plt.title('Problem 3.4 - Exponential')
plt.xlabel('$t$')
plt.ylabel('$P(t)$')
plt.show()
#4.2
N = 12
p = 0.3
psum = 0
for ni in range(0, N+1):
psum += comb(N, ni) * p**ni * (1 - p)**(N - ni)
if psum >= 0.9:
break
print('Interval is [0, {}]'.format(ni))
#4.3
N = 20
p = 0.6
psum = 0
#reverse the range so we count down from the top
for ni in range(N + 1, -1, -1):
psum += comb(N, ni) * p**ni * (1 - p)**(N - ni)
if psum >= 0.95:
break
print('Interval is [{}, N]'.format(ni))
#4.4
p = 0.02
psum = 0
for ni in range(1, 500):
psum += p * (1 - p) ** (ni - 1)
if psum >= 0.8:
break
print('Interval is [1, {}]'.format(ni))
#4.5
N = 20
p = 0.6
psum = 0
# count down
ni = N
while psum < 0.95:
psum += comb(N, ni) * p**ni * (1 - p)**(N - ni)
ni -= 1
#add 1, since when we broke we had just subtracted 1
print('Interval is [{}, N]'.format(ni + 1))
#5.2
import scipy.stats as ss
print(ss.norm.cdf(-2))
#5.4
zlo = (-2 - 0) / 1.2
zhi = (0 - 0) / 1.2
print(ss.norm.cdf(zhi) - ss.norm.cdf(zlo))
#5.5
print(ss.norm.cdf(0, loc=2, scale=1.2) - ss.norm.cdf(-2, loc=2, scale=1.2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: missing data
Step2: groupby + reshaping
|
<ASSISTANT_TASK:>
Python Code:
columns = pd.MultiIndex.from_tuples([
('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')
],
names=['exp', 'animal', 'hair_length']
)
df = pd.DataFrame(np.random.randn(4, 4), columns=columns)
df
df.columns
stacked = df.stack(level=['exp','animal'])
stacked
stacked.reset_index(level=[0,1],drop=True).stack().to_frame().reset_index()
columns = pd.MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'),
('B', 'cat'), ('A', 'dog')],
names=['exp', 'animal'])
index = pd.MultiIndex.from_product([('bar', 'baz', 'foo', 'qux'),
('one', 'two')],
names=['first', 'second'])
df = pd.DataFrame(np.random.randn(8, 4), index=index, columns=columns)
df
df3 = df.iloc[[0, 1, 4, 7], [1, 2]]
df3
df
df.stack()
df.stack().mean(1).unstack()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3.1 Data Dictionary
Step2: 3.2.4 Cleaning 'review' table
Step3: 3.2.5 Cleaning 'checkin' table
Step4: 3.2.6 Cleaning 'user' table
Step5: 3.2.7 Cleaning 'tip' table
Step6: 4. Model - Latent Dirichlet Allocation (LDA)
Step7: Generating topic probabilities for each review
|
<ASSISTANT_TASK:>
Python Code:
### Link to requirements.txt on github
business.head(2)
review.head(2)
review.text.head(2)
review_all = pd.read_csv('../../data/interim/original_csv/review.csv')
# Number of reviews by date
# The sharp seasonal falls are Chrismas Day and New Year's Day
# The sharp seasonal spikes are in summer, where people presumably have more free time
review.groupby('date').agg({'review_id': len}).reset_index().plot(x='date', y='review_id', figsize=(10,6))
checkin.head(2)
user.head(2)
tip.head(2)
tip.text.head(2)
import pandas as pd
import numpy as np
import seaborn as sns # For prettier plots. Seaborn takes over pandas' default plotter
import nltk
import pyLDAvis
import pyLDAvis.sklearn
from gensim import models, matutils
from collections import defaultdict
from gensim import corpora
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
pyLDAvis.enable_notebook()
%matplotlib inline
review = pd.read_csv('../../data/interim/clean_US_cities/2016_review.csv')
review = review.fillna('')
tvec = TfidfVectorizer(stop_words='english', min_df=10, max_df=0.5, max_features=100,
norm='l2',
strip_accents='unicode'
)
review_dtm_tfidf = tvec.fit_transform(review['text'])
cvec = CountVectorizer(stop_words='english', min_df=10, max_df=0.5, max_features=100,
strip_accents='unicode')
review_dtm_cvec = cvec.fit_transform(review['text'])
print review_dtm_tfidf.shape, review_dtm_cvec.shape
# Fitting LDA models
# On cvec DTM
lda_cvec = LatentDirichletAllocation(n_topics=10, random_state=42)
lda_cvec.fit(review_dtm_cvec)
# On tfidf DTM
lda_tfidf = LatentDirichletAllocation(n_topics=10, random_state=42)
lda_tfidf.fit(review_dtm_tfidf)
lda_viz_10_topics_cvec = pyLDAvis.sklearn.prepare(lda_cvec, review_dtm_cvec, cvec)
lda_viz_10_topics_cvec
# topic labels
topics_labels = {
1: "customer_feelings",
2: "customer_actions",
3: "restaurant_related",
4: "compliments",
5: "las_vegas_related",
6: "hotel_related",
7: "location_related",
8: "chicken_related",
9: "superlatives",
10: "ordering_pizza"
}
vocab = {v: k for k, v in cvec.vocabulary_.iteritems()}
vocab
lda_ = models.LdaModel(
matutils.Sparse2Corpus(review_dtm_cvec, documents_columns=False),
# or use the corpus object created with the dictionary in the previous frame!
# corpus,
num_topics = 10,
passes = 1,
id2word = vocab
# or use the gensim dictionary object!
# id2word = dictionary
)
stops = stopwords.words()
docs = pd.DataFrame(review_dtm_cvec.toarray(), columns=vectorizer.get_feature_names())
docs.sum()
bow = []
for document in review_dtm_cvec.toarray():
single_document = []
for token_id, token_count in enumerate(document):
if token_count > 0:
single_document.append((token_id, token_count))
bow.append(single_document)
# remove words that appear only once
frequency = defaultdict(int)
for text in documents:
for token in text.split():
frequency[token] += 1
texts = [[token for token in text.split() if frequency[token] > 1 and token not in stops]
for text in documents]
# Create gensim dictionary object
dictionary = corpora.Dictionary(texts)
# Create corpus matrix
corpus = [dictionary.doc2bow(text) for text in texts]
lda_.print_topics(num_topics=3, num_words=5)
lda_.get_document_topics(bow[0])
doc_topics = [lda_.get_document_topics(doc) for doc in corpus]
topic_data = []
for document_id, topics in enumerate(doc_topics):
document_topics = []
for topic, probability in topics:
topic_data.append({
'document_id': document_id,
'topic_id': topic,
'topic': topics_labels[topic],
'probability': probability
})
topics_df = pd.DataFrame(topic_data.[:5])
topics_df.pivot_table(values="probability", index=["document_id", "topic"]).T
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Single user review
Step2: 1.2 Remove the columns that you do not require
Step3: 1.3 Remove location records with poor accuracy
Step4: It looks like the data set contains quite accurate location measurements, as a visual inspection of the histogram suggests that almost 90% of the observations have relatively good accuracy. It is therefore safe to select only the most accurate observations.
Step5: Next, determine how many observations to keep. The impact of using an accuracy value of 40 is demonstrated in the cell below.
Step6: 73% of the records meet your criteria, and will be used as a filter in subsequent steps.
Step7: Drop the accuracy column from the DataFrame, as it is no longer required.
Step8: Note
Step9: Having two DataFrames with time as an index, you can simply "join" them on the index columns by assigning the value “None” to the argument “on” as demonstrated below.
Step10: It is time to account for possible noise, and remove the routers with sparse data (i.e., less than five observations, as in the referenced paper). Pandas "df.groupby()" will be used to do this.
Step11: 1.4.2 Compute the median location of each AP
Step12: After completing the above, you will have your geomedians, and will be ready to move on to the last step, which is to filter out the non-stationary access points.
Step13: 1.4.3 Filter out the non-stationary routers
Step14: Now, check how many of the routers pass the threshold. Iterate over the access points, and count the ratio of measurements outside the threshold to all measurements. They are assigned to "static" or "others" based on your confidence level.
Step15: The tagged routers (access points) can now be visualized on a map.
Step16: Note
Step17: You can now compare this to your computed values.
Step18: The results are acceptable. You can compute the actual distance between the points with the "haversine" function.
Step19: 2. Review of all users
Step20: 2.2 Drop APs with sparse records
Step21: 2.3 Compute medians
Step22: 2.4 Compute distances of observations to the calculated median
Step23: 2.5 Label APs as static or non-static
Step24: 2.6 Plot the static APs
Step25: <br>
|
<ASSISTANT_TASK:>
Python Code:
# Load relevant libraries.
from os import path
import pandas as pd
import numpy as np
import folium
import glob
from tqdm import tqdm
import random
%matplotlib inline
# Load custom modules.
import sys
sys.path.append('..')
from utils import getmedian, haversine
from utils import llaToECEF as coords_to_geomedian
from utils import ECEFTolla as geomedian_to_coords
from IPython.display import Image
# Define variable definitions.
wifi_path = '../data/dartmouth/wifi'
location_path = '../data/dartmouth/location/'
# Load WiFi data.
u00_wifi = pd.read_csv(path.join(wifi_path, 'wifi_u00.csv'))
u00_wifi.head(3)
# Load location data.
u00_loc = pd.read_csv(path.join(location_path, 'gps_u00.csv'))
u00_loc.head(3)
# Remove columns from WiFi dataset.
u00_wifi.drop(['freq', 'level'], axis=1, inplace=True)
u00_wifi.head(3)
# Remove irrelevant columns from location dataset.
u00_loc.drop(['provider', 'network_type', 'bearing', 'speed', 'travelstate'], axis=1, inplace=True)
u00_loc.head(3)
# Plot histogram of accuracy observations.
u00_loc.accuracy.hist(cumulative=True, density=1, histtype='step', bins=100)
# Review the dataset with Pandas decribe function.
u00_loc.accuracy.describe()
# Determine the number of records meeting our threshold of 40 for accuracy.
result = len(u00_loc[u00_loc.accuracy <= 40]) / float(len(u00_loc))
print('Proportion of records that meet the criteria is {:.1f}%'.format(100*result))
# Make a copy of the original dataset before applying the filter.
u00_loc_raw = u00_loc.copy()
# Apply the filter.
u00_loc = u00_loc[u00_loc['accuracy'] <= 40]
# Get the lenghts of each of the data objects.
original_location_count = len(u00_loc_raw)
filtered_location_count = len(u00_loc)
print("Number of location observations before filtering: {}".format(original_location_count))
print("Number of observations remaining after filtering: {}".format(filtered_location_count))
# Update the object to remove accuracy.
u00_loc.drop('accuracy', axis=1, inplace=True)
# Display the head of the new dataset.
u00_loc.head(3)
# Set the index for WiFi.
u00_wifi = u00_wifi.set_index('time')
u00_wifi.head(3)
# Set the index for location.
u00_loc = u00_loc.set_index('time')
u00_loc.head(3)
# Join the two data sets, print the number of records found and display the head of the new dataset.
u00_raw_geotags = u00_wifi.join(u00_loc, how='inner',on=None)
print('{} WiFi records found time matching location records.'.format(len(u00_raw_geotags)))
u00_raw_geotags.head(3)
# Create object u00_groups.
u00_groups = u00_raw_geotags.groupby('BSSID')
# Create a new object where filter criteria is met.
u00_geotags = u00_groups.filter(lambda gr: len(gr)>=5)
print("{} geotagged records remained after trimming for sparse data.".format(len(u00_geotags)))
print("They correspond to {} unique router APs".format(len(u00_groups)))
# Create a new DataFrame with latitude and longitude.
u00_geo_medians = pd.DataFrame(columns=[u'latitude', u'longitude'])
# Transform the data set using the provided set of utilities.
for (BSSID, geotags) in u00_groups:
geotags = [row for row in np.array(geotags[['latitude', 'longitude', 'altitude']])]
geotags = [coords_to_geomedian(row) for row in geotags]
median = getmedian(geotags)
median = geomedian_to_coords(median)[:2]
u00_geo_medians.loc[BSSID] = median
# Display the head of the geomedians object.
u00_geo_medians.head(3)
# Calculate the distances from the median.
u00_distances = {}
for BSSID, geotags in u00_groups:
u00_distances[BSSID] = []
(lat_median, lon_median) = u00_geo_medians.loc[BSSID]
for (lat, lon) in np.array(geotags[['latitude','longitude']]):
u00_distances[BSSID].append(haversine(lon, lat, lon_median, lat_median)*1000) # haversine() returns distance in [km]
# Group access points as static or non-static.
# Set the thresholds.
distance_threshold = 200
confidence_level = 0.95
# Create empty lists.
static = []
others = []
for BSSID, distances in u00_distances.items():
all_count = len(distances)
near_count = len(list(filter(lambda distance: distance <= distance_threshold, distances)))
if( near_count / all_count >= confidence_level ):
static.append(BSSID)
else:
others.append(BSSID)
# Print summary results.
print("We identified {} static routers and {} non-static (moved or mobile).".format(len(static), len(others)))
# Plot the access points on a map.
map_center = list(u00_geo_medians.median())
routers_map = folium.Map(location=map_center, zoom_start=14)
# Add points to the map for each of the locations.
for router in static:
folium.CircleMarker(u00_geo_medians.loc[router], fill_color='red', radius=15, fill_opacity=0.5).add_to(routers_map)
#Display the map.
routers_map
# Set the provided location.
lat = 43.7068263
lon = -72.2868704
bssid1 = '00:01:36:57:be:88'
bssid2 = '00:01:36:57:be:87'
u00_geo_medians.loc[[bssid1, bssid2]]
# Calculate and display the difference between calculated and Google API provided locations.
lat_m1, lon_m1 = u00_geo_medians.loc[bssid1]
lat_m2, lon_m2 = u00_geo_medians.loc[bssid2]
print('Distance from the Google API provided location to our first router ' \
'estimation is {:2g}m'.format(haversine(lon,lat,lon_m1,lat_m1)*1000))
print('Distance from the Google API provided location to our first router ' \
'estimation is {:2g}m'.format(haversine(lon,lat,lon_m2,lat_m2)*1000))
# Set variables.
all_geotags = pd.DataFrame(columns=['time','BSSID','latitude','longitude','altitude'])
all_geotags = all_geotags.set_index('time')
pcounter = 0
# Define function to build the dataset, all_geotags, using the input files supplied.
def build_ds(file_in, all_geotags):
# Get the user id.
user_id = path.basename(file_in)[5:-4]
# Read the WiFi and location data for the user.
wifi = pd.read_csv(file_in)
loc = pd.read_csv(path.join(location_path, 'gps_'+user_id+'.csv'))
# Filter location data not meeting the accuracy threshold.
loc = loc[loc.accuracy <= 40]
# Drop the columns not required.
wifi.drop(['freq', 'level'], axis=1, inplace=True)
loc.drop(['accuracy', 'provider', 'network_type', 'bearing', 'speed', 'travelstate'], axis=1, inplace=True)
# Index the datasets based on time.
loc = loc.set_index('time')
wifi = wifi.set_index('time')
# Join the datasets based on time index.
raw_tags = wifi.join(loc, how='inner')
# Return the dataset for the user.
return [raw_tags]
# Iterate through the files in the specified directory and append the results of the function to the all_geotags variable.
for f in tqdm(glob.glob(wifi_path + '/*.csv')):
# Append result from our function to all_geotags for each input file supplied.
all_geotags = all_geotags.append(build_ds(f, all_geotags))
print("{} all geotags found".format(len(all_geotags)))
all_groups = all_geotags.groupby('BSSID')
print("{} unique routers found".format(len(all_groups)))
# Drop sparsely populated access points.
all_geotags = all_groups.filter(lambda gr: len(gr)>=5)
all_groups = all_geotags.groupby('BSSID')
print("{} unique router APs remaining after dropping routers with sparse data".format(len(all_groups)))
# Create a new variable containing all the coordinates.
all_geo_medians = pd.DataFrame(columns=[u'latitude', u'longitude'])
# Compute the geomedians and add to all_geo_medians.
# Initiate progress bar.
with tqdm(total=len(all_groups)) as pbar:
# Iterate through data in all_groups as per single user example.
for i, data in enumerate(all_groups):
(BSSID, geotags) = data
geotags = [row for row in np.array(geotags[['latitude', 'longitude', 'altitude']])]
geotags = [coords_to_geomedian(row) for row in geotags]
median = getmedian(geotags)
median = geomedian_to_coords(median)[:2]
all_geo_medians.loc[BSSID] = median
pbar.update()
pbar.close()
# Calculate the distances from the median.
all_distances = {}
# Initiate progress bar.
with tqdm(total=len(all_groups)) as pbar:
# Iterate through data in all_groups as per single user example.
for i, data in enumerate(all_groups):
(BSSID, geotags) = data
all_distances[BSSID] = []
(lat_median, lon_median) = all_geo_medians.loc[BSSID]
for (lat, lon) in np.array(geotags[['latitude','longitude']]):
all_distances[BSSID].append(haversine(lon, lat, lon_median, lat_median)*1000)
pbar.update()
pbar.close()
# Group access points as static or non-static.
# Set the thresholds.
distance_threshold = 200
confidence_level = 0.95
# Create empty lists.
all_static = []
all_others = []
for BSSID, distances in all_distances.items():
all_count = len(distances)
near_count = len(list(filter(lambda distance: distance <= distance_threshold, distances)))
if( near_count / all_count >= confidence_level ):
all_static.append(BSSID)
else:
all_others.append(BSSID)
# Print summary results.
print("We identified {} static routers and {} non-static (moved or mobile).".format(len(all_static), len(all_others)))
# Plot the access points on a map.
all_map_center = list(all_geo_medians.median())
all_routers_map = folium.Map(location=all_map_center, zoom_start=10)
# Add 1000 randomly sampled points to a new variable.
random.seed(3)
rn_static = random.sample(all_static,1000)
# Add the points in rn_static to the map. A random seed value is used for reproducibility of results.
for router in rn_static:
folium.CircleMarker(all_geo_medians.loc[router], fill_color='red', radius=15, fill_opacity=0.5).add_to(all_routers_map)
# Display the map.
all_routers_map
# Your answer here.
# Please add as many cells as you require in this section.
# Your plot here.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the zip file from the web and save it to your hard drive.
Step2: Show contents of the zip file.
Step3: Read csv-formatted data directly from the zip file into pandas DataFrame. Also rename some columns for prettier output.
Step4: Show unique values of the Topic column.
Step5: Leave only those rows that have Expenditures in the column Topic. Next, leave only those that contain PPP in the Indicator Name column values. Finally, create a dictionary with a pair of variable key and its meaningful name.
Step6: Do the same for Attainment among Topic values and slightly more involved subset of Indicator Name. Here we require that it contains both strings, with primary schooling and 15.
Step7: Now show all column names in the primary data set.
Step8: Combine two dictionaries into one.
Step9: Subset the data to include only three interesting columns that we have found above and only for the year 2010.
Step10: Export data to Excel.
Step11: Now suppose we already have the data saved in the Excel file. Let's read it from scratch into pandas DataFrame.
Step12: Let's see how percentage of educated population depends on government expenditures on primary students. Also, save the picture to the pdf file.
Step13: To be more precise we can quantify the effect of expenditures on schooling via simple OLS regression.
Step14: And save the key result to the LaTeX table.
|
<ASSISTANT_TASK:>
Python Code:
import re
import requests
import zipfile
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import seaborn as sns
import statsmodels.formula.api as sm
sns.set_context('talk')
pd.set_option('float_format', '{:6.2f}'.format)
%matplotlib inline
url = 'http://databank.worldbank.org/data/download/Edstats_csv.zip'
path = '../data/WorldBank/Edstats_csv.zip'
response = requests.get(url)
with open(path, "wb") as file:
file.write(response.content)
zf = zipfile.ZipFile(path)
files = zf.namelist()
print(files)
data = pd.read_csv(zf.open(files[0]))
series = pd.read_csv(zf.open(files[2]))
series.rename(columns={series.columns[0]: 'Series Code'}, inplace=True)
data.rename(columns={data.columns[0]: 'Country Name'}, inplace=True)
print(series.columns)
print(series['Topic'].unique())
subset = series.query("Topic == 'Expenditures'")[['Series Code', 'Indicator Name']]
subset = subset[subset['Indicator Name'].str.contains('PPP')]
print(subset.values)
xvar = {'UIS.XUNIT.PPP.1.FSGOV': 'Expenditure per student'}
subset = series.query("Topic == 'Attainment'")[['Series Code', 'Indicator Name']]
subset = subset[subset['Indicator Name'].str.contains('(?=.*with primary schooling)(?=.*15)')]
print(subset.values)
yvar = {'BAR.PRM.CMPT.15UP.ZS': 'Pct with schooling'}
print(data.columns)
renames = xvar.copy()
renames.update(yvar)
print(renames)
cols = ['Country Name', 'Indicator Code', '2010']
data_sub = data.ix[data['Indicator Code'].isin(renames.keys()), cols].dropna()
data_sub.replace({'Indicator Code': renames}, inplace=True)
data_sub.set_index(cols[:2], inplace=True)
data_sub = data_sub[cols[-1]].unstack(cols[1]).dropna()
data_sub.columns.name = 'Indicator'
data_sub.index.name = 'Country'
print(data_sub.head())
data_sub.to_excel('../data/WorldBank/education.xlsx', sheet_name='data')
education = pd.read_excel('../data/WorldBank/education.xlsx', sheet_name='data', index_col=0)
print(education.head())
education['Expenditure per student (log)'] = np.log(education['Expenditure per student'])
fig = plt.figure(figsize=(8, 6))
sns.regplot(x='Expenditure per student (log)', y='Pct with schooling',
data=education, ax=fig.gca())
plt.savefig('../plots/education.pdf')
plt.show()
formula = 'Q("Pct with schooling") ~ np.log(Q("Expenditure per student"))'
result = sm.ols(formula=formula, data=education).fit()
print(result.summary())
out = pd.DataFrame({'Parameter': result.params, 't-stat': result.tvalues})
out.to_latex('../tables/education_ols.tex')
print(out)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First let's choose the basic properties required for the MERCURIUS integrator to run correctly. In particular, we are
Step2: Now that the preliminary setup is complete, it's time to add some particles to the system! When using the MERCURIUS integrator it is important to add active bodies first and semi-active bodies later. The sim.N_active variable distinguishes massive bodies from semi-active/test bodies.
Step3: Now, let's create some comets! For this simple example we are assuming that all comets have the same mass and radius.
Step4: We need to move to the COM frame to avoid drifting out of our simulation box. Also, it is always good practice to monitor the change in energy over the course of a simulation, which requires us to calculate it before and after the simulation.
Step5: We can visualize our setup using rebound.OrbitPlot
Step6: Alternatively, we can also use the WebGL Widget to get an interactive visualization of the simulation.
Step7: Finally, let's simulate our system for and check that our final relative energy error is small. The energy error is a key measure of whether the integration was performed accurately or not.
|
<ASSISTANT_TASK:>
Python Code:
import rebound
import numpy as np
sim = rebound.Simulation()
np.random.seed(42)
#integrator options
sim.integrator = "mercurius"
sim.dt = 1
sim.testparticle_type = 1
#collision and boundary options
sim.collision = "direct"
sim.collision_resolve = "merge"
sim.collision_resolve_keep_sorted = 1
sim.boundary = "open"
boxsize = 200.
sim.configure_box(boxsize)
sim.track_energy_offset = 1
#simulation time
tmax = 1e4
#massive bodies
sim.add(m=1., r=0.005) # Sun
a_neptune = 30.05
sim.add(m=5e-5,r=2e-4,a=a_neptune,e=0.01) # Neptune
sim.N_active = sim.N
# semi-active bodies
n_comets = 100
a = np.random.random(n_comets)*10 + a_neptune
e = np.random.random(n_comets)*0.009 + 0.99
inc = np.random.random(n_comets)*np.pi/2.
m = 1e-10
r = 1e-7
for i in xrange(0,n_comets):
rand = np.random.random()*2*np.pi
sim.add(m=m, r=r, a=a[i], e=e[i], inc=inc[i], Omega=0, omega=rand, f=rand)
sim.move_to_com()
E0 = sim.calculate_energy()
%matplotlib inline
fig = rebound.OrbitPlot(sim,Narc=300)
sim.getWidget(size=(500,300),scale=1.8*a_neptune)
sim.integrate(tmax)
dE = abs((sim.calculate_energy() - E0)/E0)
print(dE)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Regiones de estabilidad más comunes para estudiar
Step2: Quiz 3
Step3: 2) Aplique el método de Forward Euler para resolver el IVP hasta el tiempo $t = 1.2$ para $h_1 = 0.1$ y $h_2 = 0.4$. Considere $\lambda = -5$. Grafique sus resultados versus la curva analítica $y(t)$ utilizando marcadores únicos por cada experimento (por ejemplo use $\bullet$ para $h_1$ y $\triangle$ para $h_2$). ¿Cómo explica el comportamiento observado?
Step4: 3) El método de Backward Euler hace uso del extremo derecho del intervalo de estimación de $y'(t)$, lo que genera una forma implícita para estimar $y_{i+1}$ bajo la siguiente fórmula
Step5: 5) Resuelva mediante algún método (FE, BE, RK4, etc.) el siguiente IVP
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Plot of many regions!
l1, l2 = -3.5, 3.5
resolution = 0.01
[X, Y] = np.meshgrid(np.arange(-l2,l2,resolution), np.arange(-l2,l2,resolution))
Z = X + 1j*Y
def plot_complex_region(R, ax, title, cmap=plt.cm.gray, levels=np.linspace(0, 1, 20)):
ax.set_title(title, fontsize=16)
ax.axis("equal")
ax.arrow(0, l1, 0, 2*l2-0.15, head_width=0.10, head_length=0.10, fc='k', ec='k', alpha=1.0, lw=0.5)
ax.arrow(l1, 0, 2*l2-0.15, 0, head_width=0.10, head_length=0.10, fc='k', ec='k', alpha=1.0, lw=0.5)
ax.contourf(X, Y, R, cmap=cmap, levels=levels, origin='lower', antialiased=True)
ax.grid(True)
ax.set_xlabel(r"$\Re(z)$", fontsize=20)
ax.set_ylabel(r"$\Im(z)$", fontsize=20)
ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
regions = []
# Euler stability region
R = 1 + Z
regions.append(np.abs(R))
# Backward Euler stability region
R = 1/(1-Z)
regions.append(np.abs(R))
# RK2 stability region
R = 1 + Z + Z**2/2
regions.append(np.abs(R))
# RK4 stability region
R = 1 + Z + Z**2/2 + Z**3/6 + Z**4/24
regions.append(np.abs(R))
f, axarr = plt.subplots(2, 2, figsize=(14,14))
plot_complex_region(regions[0], axarr[0,0], "Forward Euler", plt.cm.Reds)
plot_complex_region(regions[1], axarr[0,1], "Backward Euler", plt.cm.Blues)
plot_complex_region(regions[2], axarr[1,0], "Runge-Kutta 2", plt.cm.Greens)
plot_complex_region(regions[3], axarr[1,1], "Runge-Kutta 4", plt.cm.Oranges)
plt.savefig("stability_regions.pdf")
plt.show()
levels = np.linspace(1, 10, 50)
f, axarr = plt.subplots(1, 1, figsize=(8,8))
plot_complex_region(regions[0], axarr, "Forward Euler with $\Re(\lambda > 0)$", plt.cm.Reds, levels=levels)
# Forward Euler Method
def euler_ode(y,t,f,h):
return y+h*f(t,y)
#lambda
l = -5
# stability range
hmin = 1e-16
hmax = -2.0/l
print("Stability region for lambda = {}: h in [{},{}]".format(l, hmin, hmax))
# Right hand side of ODE, f(t,y)
def f(t, y):
return l*y
#Analytical solution
def y(t):
return np.exp(l*t)
# Global parameters
T = 1.3
tt = np.linspace(0, T, 100)
plt.figure(figsize=(15,5))
# Using hmax
h = hmax
t_times = np.arange(0, T, h)
y_output = np.zeros(t_times.size)
y_output[0] = 1
for i in range(1,t_times.size):
y_output[i] = euler_ode(y_output[i-1], t_times[i-1], f, h)
plt.plot(t_times, y_output, 'b^-', lw=2.0, label=r"$h_2="+str(h)+"$", markersize=10)
# Using h inside region
h = 0.1
t_times = np.arange(0, T, h)
y_output = np.zeros(t_times.size)
y_output[0] = 1
for i in range(1,t_times.size):
y_output[i] = euler_ode(y_output[i-1], t_times[i-1], f, h)
plt.plot(t_times, y_output, 'go-', lw=2.0, label=r"$h_1="+str(h)+"$")
# Analytical solution
yy = y(tt)
plt.plot(tt, yy, 'r-', lw=2.0, label=r"$e^{\lambda t}$")
# a little rect for axis
plt.xlim(0, T)
plt.ylim(-1.1,1.1)
plt.grid(True)
plt.xticks(np.arange(0,T,h))
plt.xlabel(r"$t$", fontsize=20)
plt.ylabel(r"$y(t)$", fontsize=20)
plt.legend(loc='best', fontsize=16)
#plt.savefig("linear.pdf")
plt.show()
R = 1-Z
Rhat = np.abs(1-Z)
f, axarr = plt.subplots(1, 1, figsize=(8,8))
plot_complex_region(Rhat, axarr, "Backward Euler with $\Re(\lambda > 0)$", plt.cm.Blues)
xx = np.linspace(-5, 5, 100)
plt.figure(figsize=(7,7))
#plt.axis("off")
plt.grid(True)
plt.xlim(-6,6)
plt.ylim(-6,6)
plt.xticks(np.arange(-5,6,1))
plt.yticks(np.arange(-6,6,1))
plt.arrow(0, -6, 0, 11, head_width=0.25, head_length=0.25, fc='k', ec='k')
plt.arrow(-6, 0, 11, 0, head_width=0.25, head_length=0.25, fc='k', ec='k')
plt.text(5.5, 0, r"$\Re(z)$", fontsize=20)
plt.text(0, 5.5, r"$\Im(z)$", fontsize=20)
plt.gca().spines["bottom"].set_visible(False)
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["left"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
plt.savefig("plane.pdf")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: An M-estimator minimizes the function
Step2: Andrew's Wave
Step3: Hampel's 17A
Step4: Huber's t
Step5: Least Squares
Step6: Ramsay's Ea
Step7: Trimmed Mean
Step8: Tukey's Biweight
Step9: Scale Estimators
Step10: The mean is not a robust estimator of location
Step11: The median, on the other hand, is a robust estimator with a breakdown point of 50%
Step12: Analogously for the scale
Step13: Median Absolute Deviation
Step14: Another robust estimator of scale is the Interquartile Range (IQR)
Step15: The IQR is less robust than the MAD in the sense that it has a lower breakdown point
Step16: Duncan's Occupational Prestige data - M-estimation for outliers
Step17: Hertzprung Russell data for Star Cluster CYG 0B1 - Leverage Points
Step18: Why? Because M-estimators are not robust to leverage points.
Step19: Let's delete that line
Step20: MM estimators are good for this type of problem, unfortunately, we do not yet have these yet.
Step21: Note
Step22: Exercise
Step23: Squared error loss
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from statsmodels.compat import lmap
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
norms = sm.robust.norms
def plot_weights(support, weights_func, xlabels, xticks):
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(support, weights_func(support))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, fontsize=16)
ax.set_ylim(-.1, 1.1)
return ax
help(norms.AndrewWave.weights)
a = 1.339
support = np.linspace(-np.pi*a, np.pi*a, 100)
andrew = norms.AndrewWave(a=a)
plot_weights(support, andrew.weights, ['$-\pi*a$', '0', '$\pi*a$'], [-np.pi*a, 0, np.pi*a]);
help(norms.Hampel.weights)
c = 8
support = np.linspace(-3*c, 3*c, 1000)
hampel = norms.Hampel(a=2., b=4., c=c)
plot_weights(support, hampel.weights, ['3*c', '0', '3*c'], [-3*c, 0, 3*c]);
help(norms.HuberT.weights)
t = 1.345
support = np.linspace(-3*t, 3*t, 1000)
huber = norms.HuberT(t=t)
plot_weights(support, huber.weights, ['-3*t', '0', '3*t'], [-3*t, 0, 3*t]);
help(norms.LeastSquares.weights)
support = np.linspace(-3, 3, 1000)
lst_sq = norms.LeastSquares()
plot_weights(support, lst_sq.weights, ['-3', '0', '3'], [-3, 0, 3]);
help(norms.RamsayE.weights)
a = .3
support = np.linspace(-3*a, 3*a, 1000)
ramsay = norms.RamsayE(a=a)
plot_weights(support, ramsay.weights, ['-3*a', '0', '3*a'], [-3*a, 0, 3*a]);
help(norms.TrimmedMean.weights)
c = 2
support = np.linspace(-3*c, 3*c, 1000)
trimmed = norms.TrimmedMean(c=c)
plot_weights(support, trimmed.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
help(norms.TukeyBiweight.weights)
c = 4.685
support = np.linspace(-3*c, 3*c, 1000)
tukey = norms.TukeyBiweight(c=c)
plot_weights(support, tukey.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
x = np.array([1, 2, 3, 4, 500])
x.mean()
np.median(x)
x.std()
stats.norm.ppf(.75)
print(x)
sm.robust.scale.mad(x)
np.array([1,2,3,4,5.]).std()
sm.robust.scale.iqr(x)
np.random.seed(12345)
fat_tails = stats.t(6).rvs(40)
kde = sm.nonparametric.KDEUnivariate(fat_tails)
kde.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.density);
print(fat_tails.mean(), fat_tails.std())
print(stats.norm.fit(fat_tails))
print(stats.t.fit(fat_tails, f0=6))
huber = sm.robust.scale.Huber()
loc, scale = huber(fat_tails)
print(loc, scale)
sm.robust.mad(fat_tails)
sm.robust.mad(fat_tails, c=stats.t(6).ppf(.75))
sm.robust.scale.mad(fat_tails)
from statsmodels.graphics.api import abline_plot
from statsmodels.formula.api import ols, rlm
prestige = sm.datasets.get_rdataset("Duncan", "carData", cache=True).data
print(prestige.head(10))
fig = plt.figure(figsize=(12,12))
ax1 = fig.add_subplot(211, xlabel='Income', ylabel='Prestige')
ax1.scatter(prestige.income, prestige.prestige)
xy_outlier = prestige.loc['minister', ['income','prestige']]
ax1.annotate('Minister', xy_outlier, xy_outlier+1, fontsize=16)
ax2 = fig.add_subplot(212, xlabel='Education',
ylabel='Prestige')
ax2.scatter(prestige.education, prestige.prestige);
ols_model = ols('prestige ~ income + education', prestige).fit()
print(ols_model.summary())
infl = ols_model.get_influence()
student = infl.summary_frame()['student_resid']
print(student)
print(student.loc[np.abs(student) > 2])
print(infl.summary_frame().loc['minister'])
sidak = ols_model.outlier_test('sidak')
sidak.sort_values('unadj_p', inplace=True)
print(sidak)
fdr = ols_model.outlier_test('fdr_bh')
fdr.sort_values('unadj_p', inplace=True)
print(fdr)
rlm_model = rlm('prestige ~ income + education', prestige).fit()
print(rlm_model.summary())
print(rlm_model.weights)
dta = sm.datasets.get_rdataset("starsCYG", "robustbase", cache=True).data
from matplotlib.patches import Ellipse
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, xlabel='log(Temp)', ylabel='log(Light)', title='Hertzsprung-Russell Diagram of Star Cluster CYG OB1')
ax.scatter(*dta.values.T)
# highlight outliers
e = Ellipse((3.5, 6), .2, 1, alpha=.25, color='r')
ax.add_patch(e);
ax.annotate('Red giants', xy=(3.6, 6), xytext=(3.8, 6),
arrowprops=dict(facecolor='black', shrink=0.05, width=2),
horizontalalignment='left', verticalalignment='bottom',
clip_on=True, # clip to the axes bounding box
fontsize=16,
)
# annotate these with their index
for i,row in dta.loc[dta['log.Te'] < 3.8].iterrows():
ax.annotate(i, row, row + .01, fontsize=14)
xlim, ylim = ax.get_xlim(), ax.get_ylim()
from IPython.display import Image
Image(filename='star_diagram.png')
y = dta['log.light']
X = sm.add_constant(dta['log.Te'], prepend=True)
ols_model = sm.OLS(y, X).fit()
abline_plot(model_results=ols_model, ax=ax)
rlm_mod = sm.RLM(y, X, sm.robust.norms.TrimmedMean(.5)).fit()
abline_plot(model_results=rlm_mod, ax=ax, color='red')
infl = ols_model.get_influence()
h_bar = 2*(ols_model.df_model + 1 )/ols_model.nobs
hat_diag = infl.summary_frame()['hat_diag']
hat_diag.loc[hat_diag > h_bar]
sidak2 = ols_model.outlier_test('sidak')
sidak2.sort_values('unadj_p', inplace=True)
print(sidak2)
fdr2 = ols_model.outlier_test('fdr_bh')
fdr2.sort_values('unadj_p', inplace=True)
print(fdr2)
l = ax.lines[-1]
l.remove()
del l
weights = np.ones(len(X))
weights[X[X['log.Te'] < 3.8].index.values - 1] = 0
wls_model = sm.WLS(y, X, weights=weights).fit()
abline_plot(model_results=wls_model, ax=ax, color='green')
yy = y.values[:,None]
xx = X['log.Te'].values[:,None]
params = [-4.969387980288108, 2.2531613477892365] # Computed using R
print(params[0], params[1])
abline_plot(intercept=params[0], slope=params[1], ax=ax, color='red')
np.random.seed(12345)
nobs = 200
beta_true = np.array([3, 1, 2.5, 3, -4])
X = np.random.uniform(-20,20, size=(nobs, len(beta_true)-1))
# stack a constant in front
X = sm.add_constant(X, prepend=True) # np.c_[np.ones(nobs), X]
mc_iter = 500
contaminate = .25 # percentage of response variables to contaminate
all_betas = []
for i in range(mc_iter):
y = np.dot(X, beta_true) + np.random.normal(size=200)
random_idx = np.random.randint(0, nobs, size=int(contaminate * nobs))
y[random_idx] = np.random.uniform(-750, 750)
beta_hat = sm.RLM(y, X).fit().params
all_betas.append(beta_hat)
all_betas = np.asarray(all_betas)
se_loss = lambda x : np.linalg.norm(x, ord=2)**2
se_beta = lmap(se_loss, all_betas - beta_true)
np.array(se_beta).mean()
all_betas.mean(0)
beta_true
se_loss(all_betas.mean(0) - beta_true)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: loading different datasets
Step2: I - Clustering Nodes
Step3: 1 - Parameters Optimization
Step4: Difficult to find an elbow criteria
Step5: -> Weird
Step6: B - Mini batch
Step7: <hr>
Step8: <hr>
Step9: <hr>
Step10: <hr>
Step11: IV - Tag transactions
Step12: <hr>
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from time import time
from joblib import Parallel, delayed
import multiprocessing
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
from scipy.spatial.distance import cdist, pdist
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn.covariance import EmpiricalCovariance, MinCovDet
%%time
known = pd.read_csv('../data/known.csv')
rogues = pd.read_csv('../data/rogues.csv')
transactions = pd.read_csv('../data/edges.csv').drop('Unnamed: 0',1)
#Dropping features and fill na with 0
df = pd.read_csv('../data/features_full.csv').drop('Unnamed: 0',1).fillna(0)
df = df.set_index(['nodes'])
#build normalize values
data = scale(df.values)
n_sample = 10000
#Define estimator / by default clusters = 6 an init = 10
#kmeans = KMeans(init='k-means++', n_clusters=6, n_init=10)
#kmeans.fit(data)
#Quick PCA for k selection
X = PCA(n_components=2).fit_transform(data)
%%time
#Determine your k range
k_range = range(1,14)
# Fit the kmeans model for each n_clusters = k
k_means_var = [KMeans(n_clusters=k).fit(X) for k in k_range]
# Pull out the centroids for each model
centroids = [X.cluster_centers_ for X in k_means_var]
X
%%time
# Caluculate the Euclidean distance from each pont to each centroid
k_euclid=[cdist(X, cent, 'euclidean') for cent in centroids]
dist = [np.min(ke,axis=1) for ke in k_euclid]
# Total within-cluster sum of squares
wcss = [sum(d**2) for d in dist]
# The total sum of squares
tss = sum(pdist(X)**2)/X.shape[0]
#The between-cluster sum of squares
bss = tss - wcss
%%time
plt.plot(k_range,bss/tss,'-bo')
plt.xlabel('number of cluster')
plt.ylabel('% of variance explained')
plt.title('Variance explained vs k')
plt.grid(True)
plt.show()
np.sqrt(data.shape[0]/2)
batch_size = 10
n_clusters = 6
#PCA
X = PCA(n_components=2).fit_transform(data)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=6, n_init=10,random_state=2)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=6, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0,random_state=2)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(15, 5))
colors = ['#4EACC5', '#FF9C34', '#4E9A06','#FF0000','#800000','purple']
#fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.',markersize=10)
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
#plt.text(10,10, 'train time: %.2fs\ninertia: %f' % (
#t_batch, k_means.inertia_))
# Plot result
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.', markersize=10)
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
#plt.text(-5, 10, 'train time: %.2fs\ninertia: %f' %
#(t_mini_batch, mbk.inertia_))
# Plot result
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
fig2 = plt.figure(figsize=(15, 10))
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.',markersize=13)
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=8)
plt.title('KMeans')
plt.show()
X = PCA(n_components=2).fit_transform(data)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
robust_cov = MinCovDet().fit(X)
###############################################################################
# Display results
fig = plt.figure(figsize=(15, 8))
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(1, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='points')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
plt.xticks(())
plt.yticks(())
plt.show()
k_means = KMeans(init='random', n_clusters=6, n_init=10, random_state=2)
clusters = k_means.fit_predict(data)
df['clusters'] = clusters
tagged = pd.merge(known,df,left_on='id',how='inner',right_index=True)
rogues_tag = pd.merge(rogues,df,left_on='id',how='inner',right_index=True)
distrib = pd.DataFrame(df.groupby('clusters').count().apply(lambda x: 100*x/float(x.sum()))['total_degree'].values,columns=['Global'])
distrib['Known']=tagged.groupby('clusters').count().apply(lambda x: 100*x/float(x.sum()))['id']
distrib['Rogues']=rogues_tag.groupby('clusters').count().apply(lambda x: 100*x/float(x.sum()))['id']
distrib['Clusters']=distrib.index
distrib
distrib.get(['Global', 'Known','Rogues','Clusters']).groupby(['Clusters']).mean().plot(kind='bar',title='Cluster Distriubtion per Population');
#Several Insights, Nature of the clusters
df.groupby('clusters').mean()
#write function
def get_cluster(node,df):
return df.loc[node].clusters
#Tag from node
%%time
transactions['cluster_from'] = transactions['from'].map(lambda x: get_cluster(x,df))
#Tag to node
%%time
transactions['cluster_to'] = transactions['to'].map(lambda x: get_cluster(x,df))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We initialise the ElasticNetCV object for each implementation using the default parameters. This means l1_ratio = 0.5 and alpha takes 100 values on the interval from 0.001333 to 1.333.
Step2: Dataset
Step3: The plot below demonstrates that the weight vectors generated by this formula are very sparse.
Step4: Simulations
Step5: On these datasets, coordinate descent runs between 1 - 3 times faster than the accelerated proximal gradient method. This is to be expected since the weight vector is very sparse (as demonstrated above), and coordinate descent can take advantage of this sparsity.
Step6: We redefine the generate_data function to use the more dense weight vector rule.
Step7: Below we repeat all of the experiments using the new dataset.
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import copy
from sklearn.linear_model import ElasticNetCV as ElasticNetCV_sk
from prox_elasticnet import ElasticNetCV as ElasticNetCV_px
np.random.seed(319159)
from sklearn import __version__ as sklearn_version
print("Using sklearn version {}.".format(sklearn_version))
ecv_sk = ElasticNetCV_sk(max_iter = 50000)
ecv_px = ElasticNetCV_px(max_iter = 50000)
def generate_data(n, k, rho):
t_start = time.time()
mean = np.zeros(k)
cov = np.empty((k,k))
cov.fill(rho)
np.fill_diagonal(cov, 1)
X = np.random.multivariate_normal(mean, cov, n)
w = np.fromfunction(lambda i: (-1)**i * np.exp(- np.pi/40.0 * (i - 1)**2), (k,), dtype=float)
Xw = np.dot(X,w)
epsilon = np.random.normal(0, 1, n)
sigma = np.sqrt(np.dot(w,w))/(2*np.sqrt(2))
y = Xw + sigma * epsilon
t_end = time.time()
print("Generated data in {:.4f} s\n".format(t_end - t_start))
return X, y
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(np.fromfunction(lambda i: (-1)**i * np.exp(- np.pi/40.0 * (i - 1)**2), (100,), dtype=float), bins=101)
plt.xlabel("w_i")
plt.ylabel("Frequency")
plt.title("Distribution of weight vector entries (k = 100)")
plt.show()
def repeat_and_time(enet_obj, X, y, n_repeats):
enet = copy.deepcopy(enet_obj)
t = np.empty(n_repeats, dtype = float)
for i in range(n_repeats):
t_start = time.process_time()
enet.fit(X,y)
t_end = time.process_time()
t[i] = t_end - t_start
t_avg = np.mean(t)
t_stderr = np.std(t) / np.sqrt(n_repeats)
if hasattr(enet, 'eta'):
obj_str = "px"
else:
obj_str = "sk"
print("Method: {}".format(obj_str))
print("----------")
print("Repeated fit method {} times.".format(n_repeats))
print("Average time: {:.4f} s \t Standard error: {:.4f} s\n".format(t_avg, t_stderr))
return enet, t_avg, t_stderr
n_repeat = 40
X_1, y_1 = generate_data(n = 10000, k = 100, rho = 0)
ecv_sk_1, t_avg_sk_1, t_se_px_1 = repeat_and_time(ecv_sk, X_1, y_1, n_repeat)
ecv_px_1, t_avg_px_1, t_se_px_1 = repeat_and_time(ecv_px, X_1, y_1, n_repeat)
X_2, y_2 = generate_data(n = 10000, k = 100, rho = 0.5)
ecv_sk_2, t_avg_sk_2, t_se_sk_2 = repeat_and_time(ecv_sk, X_2, y_2, n_repeat)
ecv_px_2, t_avg_px_2, t_se_px_2 = repeat_and_time(ecv_px, X_2, y_2, n_repeat)
X_3, y_3 = generate_data(n = 100, k = 10000, rho = 0)
ecv_sk_3, t_avg_sk_3, t_se_sk_3 = repeat_and_time(ecv_sk, X_3, y_3, n_repeat)
ecv_px_3, t_avg_px_3, t_se_px_3 = repeat_and_time(ecv_px, X_3, y_3, n_repeat)
X_4, y_4 = generate_data(n = 100, k = 10000, rho = 0.5)
ecv_sk_4, t_avg_sk_4, t_se_sk_4 = repeat_and_time(ecv_sk, X_4, y_4, n_repeat)
ecv_px_4, t_avg_px_4, t_se_px_4 = repeat_and_time(ecv_px, X_4, y_4, n_repeat)
plt.hist(10*np.fromfunction(lambda i: (-1)**i * np.sqrt(np.abs(np.sin(np.pi * i/20)) * i/100), (100,), dtype=float), bins=101)
plt.xlabel("w_i")
plt.ylabel("Frequency")
plt.title("Distribution of weight vector entries (k = 100)")
plt.show()
def generate_data(n, k, rho):
t_start = time.time()
mean = np.zeros(k)
cov = np.empty((k,k))
cov.fill(rho)
np.fill_diagonal(cov, 1)
X = np.random.multivariate_normal(mean, cov, n)
w = np.fromfunction(lambda i: 10 * (-1)**i * np.sqrt(np.abs(np.sin(np.pi * i/20)) * i/k), (k,), dtype=float)
Xw = np.dot(X,w)
epsilon = np.random.normal(0, 1, n)
sigma = np.sqrt(np.dot(w,w))/(2*np.sqrt(2))
y = Xw + sigma * epsilon
t_end = time.time()
print("Generated data in {:.4f} s\n".format(t_end - t_start))
return X, y
n_repeat = 10
X_1, y_1 = generate_data(n = 10000, k = 100, rho = 0)
ecv_sk_1, t_avg_sk_1, t_se_px_1 = repeat_and_time(ecv_sk, X_1, y_1, n_repeat)
ecv_px_1, t_avg_px_1, t_se_px_1 = repeat_and_time(ecv_px, X_1, y_1, n_repeat)
X_2, y_2 = generate_data(n = 10000, k = 100, rho = 0.5)
ecv_sk_2, t_avg_sk_2, t_se_sk_2 = repeat_and_time(ecv_sk, X_2, y_2, n_repeat)
ecv_px_2, t_avg_px_2, t_se_px_2 = repeat_and_time(ecv_px, X_2, y_2, n_repeat)
X_3, y_3 = generate_data(n = 100, k = 10000, rho = 0)
ecv_sk_3, t_avg_sk_3, t_se_sk_3 = repeat_and_time(ecv_sk, X_3, y_3, n_repeat)
ecv_px_3, t_avg_px_3, t_se_px_3 = repeat_and_time(ecv_px, X_3, y_3, n_repeat)
X_4, y_4 = generate_data(n = 100, k = 10000, rho = 0.5)
ecv_sk_4, t_avg_sk_4, t_se_sk_4 = repeat_and_time(ecv_sk, X_4, y_4, n_repeat)
ecv_px_4, t_avg_px_4, t_se_px_4 = repeat_and_time(ecv_px, X_4, y_4, n_repeat)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How we handle multiple -and conflictive- objectives?
Step2: Planting a constant seed to always have the same results (and avoid surprises in class). -you should not do this in a real-world case!
Step3: To start, lets have a visual example of the Pareto dominance relationship in action.
Step5: An illustrative MOP
Step6: Preparing a DEAP toolbox with Dent.
Step7: Defining attributes, individuals and population.
Step8: Creating an example population distributed as a mesh.
Step9: Visualizing Dent
Step10: We also need a_given_individual.
Step11: Implementing the Pareto dominance relation between two individuals.
Step12: Note
Step13: Lets compute the set of individuals that are dominated by a_given_individual, the ones that dominate it (its dominators) and the remaining ones.
Step14: Having a_given_individual (blue dot) we can now plot those that are dominated by it (in green), those that dominate it (in red) and those that are uncomparable.
Step15: Obtaining the nondominated front.
Step16: So, is this the end?
Step17: Describing attributes, individuals and population and defining the selection, mating and mutation operators.
Step18: Let's also use the toolbox to store other configuration parameters of the algorithm. This will show itself usefull when performing massive experiments.
Step19: A compact NSGA-II implementation
Step20: Running the algorithm
Step21: We can now get the Pareto fronts in the results (res).
Step22: Resulting Pareto fronts
Step23: It is better to make an animated plot of the evolution as it takes place.
Step24: Re-run the algorithm to get the data necessary for plotting.
Step25: The previous animation makes the notebook too big for online viewing. To circumvent this, it is better to save the animation as video and (manually) upload it to YouTube.
Step26: Here it is clearly visible how the algorithm "jumps" from one local-optimum to a better one as evolution takes place.
Step27: DTLZ7 has many disconnected Pareto-optimal fronts.
Step28: How does our NSGA-II behaves when faced with different benchmark problems?
Step29: Running NSGA-II solving all problems. Now it takes longer.
Step30: Creating this animation takes more programming effort.
Step31: Saving the animation as video and uploading it to YouTube.
Step32: It is interesting how the algorithm deals with each problem
Step33: We add a experiment_name to toolbox that we will fill up later on.
Step34: We can now replicate this toolbox instance and then modify the mutation probabilities.
Step35: Now toolboxes is a list of copies of the same toolbox. One for each experiment configuration (population size).
Step36: Experiment design
Step37: Running experiments in parallel
Step38: A side-effect of using process-based parallelization
Step39: All set! Run the experiments...
Step40: As you can see, even this relatively small experiment took lots of time!
Step41: In case you need it, this file is included in the github repository.
Step42: results is a dictionary, but a pandas DataFrame is a more handy container for the results.
Step43: A first glace at the results
Step44: The local Pareto-optimal fronts are clearly visible!
Step45: We can now compute the hypervolume of the Pareto-optimal fronts yielded by each algorithm run.
Step46: How can we interpret the indicators?
Step47: Option B
Step48: Option C
Step49: The Kruskal-Wallis H-test tests the null hypothesis that the population median of all of the groups are equal.
Step50: We now can assert that the results are not the same but which ones are different or similar to the others the others?
Step51: We now know in what cases the difference is sufficient as to say that one result is better than the other.
Step52: Mann–Whitney U test (also called the Mann–Whitney–Wilcoxon (MWW), Wilcoxon rank-sum test (WRS), or Wilcoxon–Mann–Whitney test) is a nonparametric test of the null hypothesis that two populations are the same against an alternative hypothesis, especially that a particular population tends to have larger values than the other.
Step53: The familywise error rate (FWER) is the probability of making one or more false discoveries, or type I errors, among all the hypotheses when performing multiple hypotheses tests.
Step54: Let's apply the corrected alpha to raw_p_values. If we have a cell with a True value that means that those two results are the same.
|
<ASSISTANT_TASK:>
Python Code:
import time, array, random, copy, math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from deap import algorithms, base, benchmarks, tools, creator
random.seed(a=42)
creator.create("FitnessMin", base.Fitness, weights=(-1.0,-1.0))
creator.create("Individual", array.array, typecode='d',
fitness=creator.FitnessMin)
def dent(individual, lbda = 0.85):
Implements the test problem Dent
Num. variables = 2; bounds in [-1.5, 1.5]; num. objetives = 2.
@author Cesar Revelo
d = lbda * math.exp(-(individual[0] - individual[1]) ** 2)
f1 = 0.5 * (math.sqrt(1 + (individual[0] + individual[1]) ** 2) + \
math.sqrt(1 + (individual[0] - individual[1]) ** 2) + \
individual[0] - individual[1]) + d
f2 = 0.5 * (math.sqrt(1 + (individual[0] + individual[1]) ** 2) + \
math.sqrt(1 + (individual[0] - individual[1]) ** 2) - \
individual[0] + individual[1]) + d
return f1, f2
toolbox = base.Toolbox()
BOUND_LOW, BOUND_UP = -1.5, 1.5
NDIM = 2
toolbox.register("evaluate", dent)
def uniform(low, up, size=None):
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
toolbox.register("individual", tools.initIterate,
creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list,
toolbox.individual)
num_samples = 50
limits = [np.arange(BOUND_LOW, BOUND_UP, (BOUND_UP - BOUND_LOW)/num_samples)] * NDIM
sample_x = np.meshgrid(*limits)
flat = []
for i in range(len(sample_x)):
x_i = sample_x[i]
flat.append(x_i.reshape(num_samples**NDIM))
example_pop = toolbox.population(n=num_samples**NDIM)
for i, ind in enumerate(example_pop):
for j in range(len(flat)):
ind[j] = flat[j][i]
fitnesses = toolbox.map(toolbox.evaluate, example_pop)
for ind, fit in zip(example_pop, fitnesses):
ind.fitness.values = fit
plt.figure(figsize=(11,5))
plt.subplot(1,2,1)
for ind in example_pop: plt.plot(ind[0], ind[1], 'k.', ms=3)
plt.xlabel('$x_1$');plt.ylabel('$x_2$');plt.title('Decision space');
plt.subplot(1,2,2)
for ind in example_pop: plt.plot(ind.fitness.values[0], ind.fitness.values[1], 'k.', ms=3)
plt.xlabel('$f_1(\mathbf{x})$');plt.ylabel('$f_2(\mathbf{x})$');
plt.xlim((0.5,3.6));plt.ylim((0.5,3.6)); plt.title('Objective space');
a_given_individual = toolbox.population(n=1)[0]
a_given_individual[0] = 0.5
a_given_individual[1] = 0.5
a_given_individual.fitness.values = toolbox.evaluate(a_given_individual)
def pareto_dominance(ind1, ind2):
'Returns `True` if `ind1` dominates `ind2`.'
extrictly_better = False
for item1 in ind1.fitness.values:
for item2 in ind2.fitness.values:
if item1 > item2:
return False
if not extrictly_better and item1 < item2:
extrictly_better = True
return extrictly_better
def efficient_pareto_dominance(ind1, ind2):
return tools.emo.isDominated(ind1.fitness.values, ind2.fitness.values)
dominated = [ind for ind in example_pop
if pareto_dominance(a_given_individual, ind)]
dominators = [ind for ind in example_pop
if pareto_dominance(ind, a_given_individual)]
others = [ind for ind in example_pop
if not ind in dominated and not ind in dominators]
def plot_dent():
'Plots the points in decision and objective spaces.'
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
for ind in dominators: plt.plot(ind[0], ind[1], 'r.')
for ind in dominated: plt.plot(ind[0], ind[1], 'g.')
for ind in others: plt.plot(ind[0], ind[1], 'k.', ms=3)
plt.plot(a_given_individual[0], a_given_individual[1], 'bo', ms=6);
plt.xlabel('$x_1$');plt.ylabel('$x_2$');
plt.title('Decision space');
plt.subplot(1,2,2)
for ind in dominators: plt.plot(ind.fitness.values[0], ind.fitness.values[1], 'r.', alpha=0.7)
for ind in dominated: plt.plot(ind.fitness.values[0], ind.fitness.values[1], 'g.', alpha=0.7)
for ind in others: plt.plot(ind.fitness.values[0], ind.fitness.values[1], 'k.', alpha=0.7, ms=3)
plt.plot(a_given_individual.fitness.values[0], a_given_individual.fitness.values[1], 'bo', ms=6);
plt.xlabel('$f_1(\mathbf{x})$');plt.ylabel('$f_2(\mathbf{x})$');
plt.xlim((0.5,3.6));plt.ylim((0.5,3.6));
plt.title('Objective space');
plt.tight_layout()
plot_dent()
non_dom = tools.sortNondominated(example_pop, k=len(example_pop),
first_front_only=True)[0]
plt.figure(figsize=(5,5))
for ind in example_pop:
plt.plot(ind.fitness.values[0], ind.fitness.values[1], 'k.', ms=3, alpha=0.5)
for ind in non_dom:
plt.plot(ind.fitness.values[0], ind.fitness.values[1], 'bo', alpha=0.74, ms=5)
toolbox = base.Toolbox()
BOUND_LOW, BOUND_UP = 0.0, 1.0
toolbox.register("evaluate", lambda ind: benchmarks.dtlz3(ind, 2))
toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)
toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)
toolbox.register("select", tools.selNSGA2)
toolbox.pop_size = 50
toolbox.max_gen = 500
toolbox.mut_prob = 0.2
def nsga_ii(toolbox, stats=None, verbose=False):
pop = toolbox.population(n=toolbox.pop_size)
pop = toolbox.select(pop, len(pop))
return algorithms.eaMuPlusLambda(pop, toolbox, mu=toolbox.pop_size,
lambda_=toolbox.pop_size,
cxpb=1-toolbox.mut_prob,
mutpb=toolbox.mut_prob,
stats=stats,
ngen=toolbox.max_gen,
verbose=verbose)
%time res, logbook = nsga_ii(toolbox)
fronts = tools.emo.sortLogNondominated(res, len(res))
plot_colors = ('b','r', 'g', 'm', 'y', 'k', 'c')
fig, ax = plt.subplots(1, figsize=(4,4))
for i,inds in enumerate(fronts):
par = [toolbox.evaluate(ind) for ind in inds]
df = pd.DataFrame(par)
df.plot(ax=ax, kind='scatter', label='Front ' + str(i+1),
x=df.columns[0], y=df.columns[1],
color=plot_colors[i % len(plot_colors)])
plt.xlabel('$f_1(\mathbf{x})$');plt.ylabel('$f_2(\mathbf{x})$');
stats = tools.Statistics()
stats.register("pop", copy.deepcopy)
toolbox.max_gen = 4000 # we need more generations!
%time res, logbook = nsga_ii(toolbox, stats=stats)
from JSAnimation import IPython_display
import matplotlib.colors as colors
from matplotlib import animation
def animate(frame_index, logbook):
'Updates all plots to match frame _i_ of the animation.'
ax.clear()
fronts = tools.emo.sortLogNondominated(logbook.select('pop')[frame_index],
len(logbook.select('pop')[frame_index]))
for i,inds in enumerate(fronts):
par = [toolbox.evaluate(ind) for ind in inds]
df = pd.DataFrame(par)
df.plot(ax=ax, kind='scatter', label='Front ' + str(i+1),
x=df.columns[0], y =df.columns[1], alpha=0.47,
color=plot_colors[i % len(plot_colors)])
ax.set_title('$t=$' + str(frame_index))
ax.set_xlabel('$f_1(\mathbf{x})$');ax.set_ylabel('$f_2(\mathbf{x})$')
return None
fig = plt.figure(figsize=(4,4))
ax = fig.gca()
anim = animation.FuncAnimation(fig, lambda i: animate(i, logbook),
frames=len(logbook), interval=60,
blit=True)
anim
anim.save('nsgaii-dtlz3.mp4', fps=15, bitrate=-1, dpi=500)
from IPython.display import YouTubeVideo
YouTubeVideo('Cm7r4cJq59s')
def dtlz5(ind, n_objs):
from functools import reduce
g = lambda x: sum([(a - 0.5)**2 for a in x])
gval = g(ind[n_objs-1:])
theta = lambda x: math.pi / (4.0 * (1 + gval)) * (1 + 2 * gval * x)
fit = [(1 + gval) * math.cos(math.pi / 2.0 * ind[0]) *
reduce(lambda x,y: x*y, [math.cos(theta(a)) for a in ind[1:]])]
for m in reversed(range(1, n_objs)):
if m == 1:
fit.append((1 + gval) * math.sin(math.pi / 2.0 * ind[0]))
else:
fit.append((1 + gval) * math.cos(math.pi / 2.0 * ind[0]) *
reduce(lambda x,y: x*y, [math.cos(theta(a)) for a in ind[1:m-1]], 1) *
math.sin(theta(ind[m-1])))
return fit
def dtlz6(ind, n_objs):
from functools import reduce
gval = sum([a**0.1 for a in ind[n_objs-1:]])
theta = lambda x: math.pi / (4.0 * (1 + gval)) * (1 + 2 * gval * x)
fit = [(1 + gval) * math.cos(math.pi / 2.0 * ind[0]) *
reduce(lambda x,y: x*y, [math.cos(theta(a)) for a in ind[1:]])]
for m in reversed(range(1, n_objs)):
if m == 1:
fit.append((1 + gval) * math.sin(math.pi / 2.0 * ind[0]))
else:
fit.append((1 + gval) * math.cos(math.pi / 2.0 * ind[0]) *
reduce(lambda x,y: x*y, [math.cos(theta(a)) for a in ind[1:m-1]], 1) *
math.sin(theta(ind[m-1])))
return fit
def dtlz7(ind, n_objs):
gval = 1 + 9.0 / len(ind[n_objs-1:]) * sum([a for a in ind[n_objs-1:]])
fit = [ind for ind in ind[:n_objs-1]]
fit.append((1 + gval) * (n_objs - sum([a / (1.0 + gval) * (1 + math.sin(3 * math.pi * a)) for a in ind[:n_objs-1]])))
return fit
problem_instances = {'ZDT1': benchmarks.zdt1, 'ZDT2': benchmarks.zdt2,
'ZDT3': benchmarks.zdt3, 'ZDT4': benchmarks.zdt4,
'DTLZ1': lambda ind: benchmarks.dtlz1(ind,2),
'DTLZ2': lambda ind: benchmarks.dtlz2(ind,2),
'DTLZ3': lambda ind: benchmarks.dtlz3(ind,2),
'DTLZ4': lambda ind: benchmarks.dtlz4(ind,2, 100),
'DTLZ5': lambda ind: dtlz5(ind,2),
'DTLZ6': lambda ind: dtlz6(ind,2),
'DTLZ7': lambda ind: dtlz7(ind,2)}
toolbox.max_gen = 1000
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("obj_vals", np.copy)
def run_problem(toolbox, problem):
toolbox.register('evaluate', problem)
return nsga_ii(toolbox, stats=stats)
%time results = {problem: run_problem(toolbox, problem_instances[problem]) \
for problem in problem_instances}
class MultiProblemAnimation:
def init(self, fig, results):
self.results = results
self.axs = [fig.add_subplot(3,4,i+1) for i in range(len(results))]
self.plots =[]
for i, problem in enumerate(sorted(results)):
(res, logbook) = self.results[problem]
pop = pd.DataFrame(data=logbook.select('obj_vals')[0])
plot = self.axs[i].plot(pop[0], pop[1], 'b.', alpha=0.47)[0]
self.plots.append(plot)
fig.tight_layout()
def animate(self, t):
'Updates all plots to match frame _i_ of the animation.'
for i, problem in enumerate(sorted(results)):
#self.axs[i].clear()
(res, logbook) = self.results[problem]
pop = pd.DataFrame(data=logbook.select('obj_vals')[t])
self.plots[i].set_data(pop[0], pop[1])
self.axs[i].set_title(problem + '; $t=' + str(t)+'$')
self.axs[i].set_xlim((0, max(1,pop.max()[0])))
self.axs[i].set_ylim((0, max(1,pop.max()[1])))
return self.axs
mpa = MultiProblemAnimation()
fig = plt.figure(figsize=(14,6))
anim = animation.FuncAnimation(fig, mpa.animate, init_func=mpa.init(fig,results),
frames=toolbox.max_gen, interval=60, blit=True)
anim
anim.save('nsgaii-benchmarks.mp4', fps=15, bitrate=-1, dpi=500)
YouTubeVideo('8t-aWcpDH0U')
toolbox = base.Toolbox()
BOUND_LOW, BOUND_UP = 0.0, 1.0
NDIM = 30
# the explanation of this... a few lines bellow
def eval_helper(ind):
return benchmarks.dtlz3(ind, 2)
toolbox.register("evaluate", eval_helper)
def uniform(low, up, size=None):
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)
toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)
toolbox.register("select", tools.selNSGA2)
toolbox.pop_size = 200
toolbox.max_gen = 500
toolbox.experiment_name = "$P_\mathrm{mut}="
mut_probs = (0.05, 0.15, 0.3)
number_of_experiments = len(mut_probs)
toolboxes=list([copy.copy(toolbox) for _ in range(number_of_experiments)])
for i, toolbox in enumerate(toolboxes):
toolbox.mut_prob = mut_probs[i]
toolbox.experiment_name = toolbox.experiment_name + str(mut_probs[i]) +'$'
for toolbox in toolboxes:
print(toolbox.experiment_name, toolbox.mut_prob)
number_of_runs = 42
from IPython.html import widgets
from IPython.display import display
progress_bar = widgets.IntProgressWidget(description="Starting...",
max=len(toolboxes)*number_of_runs)
def run_algo_wrapper(toolboox):
result,a = nsga_ii(toolbox)
pareto_sets = tools.emo.sortLogNondominated(result, len(result))
return pareto_sets[0]
%%time
from multiprocessing import Pool
display(progress_bar)
results = {}
pool = Pool()
for toolbox in toolboxes:
results[toolbox.experiment_name] = pool.map(run_algo_wrapper, [toolbox] * number_of_runs)
progress_bar.value +=number_of_runs
progress_bar.description = "Finished %03d of %03d:" % (progress_bar.value, progress_bar.max)
import pickle
pickle.dump(results, open('nsga_ii_dtlz3-results.pickle', 'wb'))
loaded_results = pickle.load(open('nsga_ii_dtlz3-results.pickle', 'rb'))
results = loaded_results # <-- (un)comment when needed
res = pd.DataFrame(results)
res.head()
a = res.applymap(lambda pop: [toolbox.evaluate(ind) for ind in pop])
plt.figure(figsize=(11,3))
for i, col in enumerate(a.columns):
plt.subplot(1, len(a.columns), i+1)
for pop in a[col]:
x = pd.DataFrame(data=pop)
plt.scatter(x[0], x[1], marker='.', alpha=0.5)
plt.title(col)
def calculate_reference(results, epsilon=0.1):
alldata = np.concatenate(np.concatenate(results.values))
obj_vals = [toolbox.evaluate(ind) for ind in alldata]
return np.max(obj_vals, axis=0) + epsilon
reference = calculate_reference(res)
reference
import deap.benchmarks.tools as bt
hypervols = res.applymap(lambda pop: bt.hypervolume(pop, reference))
hypervols.head()
hypervols.describe()
import seaborn
seaborn.set(style="whitegrid")
fig = plt.figure(figsize=(15,3))
plt.subplot(1,2,1, title='Violin plots of NSGA-II with $P_{\mathrm{mut}}$')
seaborn.violinplot(hypervols, alpha=0.74)
plt.ylabel('Hypervolume'); plt.xlabel('Mutation probabilities')
plt.subplot(1,2,2, title='Box plots of NSGA-II with $P_{\mathrm{mut}}$')
seaborn.boxplot(hypervols, alpha=0.74)
plt.ylabel('Hypervolume'); plt.xlabel('Mutation probabilities');
import itertools
import scipy.stats as stats
def compute_stat_matrix(data, stat_func, alpha=0.05):
'''A function that applies `stat_func` to all combinations of columns in `data`.
Returns a squared matrix with the p-values'''
p_values = pd.DataFrame(columns=data.columns, index=data.columns)
for a,b in itertools.combinations(data.columns,2):
s,p = stat_func(data[a], data[b])
p_values[a].ix[b] = p
p_values[b].ix[a] = p
return p_values
stats.kruskal(*[hypervols[col] for col in hypervols.columns])
def conover_inman_procedure(data, alpha=0.05):
num_runs = len(data)
num_algos = len(data.columns)
N = num_runs*num_algos
_,p_value = stats.kruskal(*[data[col] for col in data.columns])
ranked = stats.rankdata(np.concatenate([data[col] for col in data.columns]))
ranksums = []
for i in range(num_algos):
ranksums.append(np.sum(ranked[num_runs*i:num_runs*(i+1)]))
S_sq = (np.sum(ranked**2) - N*((N+1)**2)/4)/(N-1)
right_side = stats.t.cdf(1-(alpha/2), N-num_algos) * \
math.sqrt((S_sq*((N-1-p_value)/(N-1)))*2/num_runs)
res = pd.DataFrame(columns=data.columns, index=data.columns)
for i,j in itertools.combinations(np.arange(num_algos),2):
res[res.columns[i]].ix[j] = abs(ranksums[i] - ranksums[j]/num_runs) > right_side
res[res.columns[j]].ix[i] = abs(ranksums[i] - ranksums[j]/num_runs) > right_side
return res
conover_inman_procedure(hypervols)
hyp_transp = hypervols.transpose()
measurements = [list(hyp_transp[col]) for col in hyp_transp.columns]
stats.friedmanchisquare(*measurements)
raw_p_values=compute_stat_matrix(hypervols, stats.mannwhitneyu)
raw_p_values
from scipy.misc import comb
alpha=0.05
alpha_sid = 1 - (1-alpha)**(1/comb(len(hypervols.columns), 2))
alpha_sid
raw_p_values.applymap(lambda value: value <= alpha_sid)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the Data
Step2: Labeling the Data
Step3: Preparing Input, Output 'Master' DataFrames
Step4: Preprocessing, continued
Step5: Exploratory Visualizations
Step6: AF Extractor Models
Step7: Preparing GridSearch and Assesing Stock MLPC as AF extractor models
Step8: Training the Solution and Benchmark Models
Step9: Benchmark Optimization
Step10: Solution Model
Step12: Various Stat Calculations
Step13: Sensitivity Testing
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import prepare_EMG, prepare_outputs, prepare_data, pandas
EMG_Prep = prepare_EMG.EMG_preparer()
Output_Prep = prepare_outputs.output_preparer()
Data_Prep = prepare_data.data_preparer()
singles_1 = Data_Prep.load_singletons(1)
singles_2 = Data_Prep.load_singletons(2)
singles_3 = Data_Prep.load_singletons(3)
# print(singles_1.keys())
from scipy import signal
import numpy as np
labels = {}
windows = {}
for word in singles_1:
try:
label = Output_Prep.transform(word)
num_phonemes = label.shape[0]
label = label.append(Output_Prep.transform(word))
label = label.append(Output_Prep.transform(word))
labels[word] = label
widths = np.linspace(0.01,10,50)
wt_out = signal.cwt(singles_1[word]['voltage'], signal.ricker, widths)
wt_out = pandas.DataFrame(wt_out).T
windows[word] = EMG_Prep.process(wt_out, num_phonemes, wavelets=True)
wt_out_2 = signal.cwt(singles_2[word]['voltage'], signal.ricker, widths)
wt_out_2 = pandas.DataFrame(wt_out_2).T
windows[word] = windows[word].append(EMG_Prep.process(wt_out_2, num_phonemes, wavelets=True))
wt_out_3 = signal.cwt(singles_3[word]['voltage'], signal.ricker, widths)
wt_out_3 = pandas.DataFrame(wt_out_3).T
windows[word] = windows[word].append(EMG_Prep.process(wt_out_3, num_phonemes, wavelets=True))
except Exception as inst:
print(inst)
import pandas
%autoreload 2
y = pandas.DataFrame()
X = pandas.DataFrame()
for word in labels:
# append labels to the master label dataframe
label_frame = labels[word]
y = y.append(label_frame)
# Use phonemes to name each series in 'windows' for that word
window_frame = windows[word]
if len(label_frame.axes[0]):
window_frame = window_frame.rename_axis(lambda x: label_frame.axes[0][x])
X = X.append(window_frame)
else:
print('no labels for:',word)
print(y.head(),X.head())
# print(X.head(18), y.head(18))
from sklearn.preprocessing import scale,normalize
from sklearn.decomposition import PCA
X_scaled = scale(X)
pca = PCA(n_components=10, random_state=9)
X_reduced = pca.fit_transform(X_scaled)
X_normalized = normalize(X_reduced)
X_normalized = pandas.DataFrame(X_normalized)
X_normalized = X_normalized.rename_axis(lambda x: 'pc-'+str(x), axis='columns')
print(X_normalized)
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
sig = singles_1['advice']['voltage']
length = len(sig)
dur = singles_1['advice']['time'][length-1]
widths = np.linspace(.01,10,50)
wt_out = signal.cwt(sig, signal.ricker, widths)
# print (wt_out, wt_out.shape)
plt.imshow(wt_out, extent=[0, dur, 10, .01],cmap='PRGn',aspect='auto',vmax=abs(wt_out).max(), vmin=-abs(wt_out).max())
plt.show()
wt_out_frame = pandas.DataFrame(wt_out).T
# print(wt_out_frame.head())
sig = singles_2['advice']['voltage']
length = len(sig)
dur = singles_2['advice']['time'][length-1]
widths = np.linspace(.01,10,50)
wt_out = signal.cwt(sig, signal.ricker, widths)
# print (wt_out, wt_out.shape)
plt.imshow(wt_out, extent=[0, dur, 10, .01],cmap='PRGn',aspect='auto',vmax=abs(wt_out).max(), vmin=-abs(wt_out).max())
plt.show()
wt_out_frame = pandas.DataFrame(wt_out).T
sig = singles_3['advice']['voltage']
length = len(sig)
dur = singles_3['advice']['time'][length-1]
widths = np.linspace(.01,10,50)
wt_out = signal.cwt(sig, signal.ricker, widths)
# print (wt_out, wt_out.shape)
plt.imshow(wt_out, extent=[0, dur, 10, .01],cmap='PRGn',aspect='auto',vmax=abs(wt_out).max(), vmin=-abs(wt_out).max())
plt.show()
wt_out_frame = pandas.DataFrame(wt_out).T
sig = singles_1['aspiring']['voltage']
length = len(sig)
dur = singles_1['aspiring']['time'][length-1]
widths = np.linspace(.01,10,50)
wt_out = signal.cwt(sig, signal.ricker, widths)
# print (wt_out, wt_out.shape)
plt.imshow(wt_out, extent=[0, dur, 10, .01],cmap='PRGn',aspect='auto',vmax=abs(wt_out).max(), vmin=-abs(wt_out).max())
plt.show()
wt_out_frame = pandas.DataFrame(wt_out).T
# print(wt_out_frame.head())
sig = singles_2['aspiring']['voltage']
length = len(sig)
dur = singles_2['aspiring']['time'][length-1]
widths = np.linspace(.01,10,50)
wt_out = signal.cwt(sig, signal.ricker, widths)
# print (wt_out, wt_out.shape)
plt.imshow(wt_out, extent=[0, dur, 10, .01],cmap='PRGn',aspect='auto',vmax=abs(wt_out).max(), vmin=-abs(wt_out).max())
plt.show()
wt_out_frame = pandas.DataFrame(wt_out).T
sig = singles_3['aspiring']['voltage']
length = len(sig)
dur = singles_3['aspiring']['time'][length-1]
widths = np.linspace(.01,10,50)
wt_out = signal.cwt(sig, signal.ricker, widths)
# print (wt_out, wt_out.shape)
plt.imshow(wt_out, extent=[0, dur, 10, .01],cmap='PRGn',aspect='auto',vmax=abs(wt_out).max(), vmin=-abs(wt_out).max())
plt.show()
wt_out_frame = pandas.DataFrame(wt_out).T
sig3 = singles_1['weather']['voltage']
length = len(sig)
dur = singles_1['weather']['time'][length-1]
widths = np.linspace(.01,10,50)
wt_out = signal.cwt(sig3, signal.ricker, widths)
# print (wt_out, wt_out.shape)
plt.imshow(wt_out, extent=[0, dur, 10, .01],cmap='PRGn',aspect='auto',vmax=abs(wt_out).max(), vmin=-abs(wt_out).max())
plt.show()
sig3 = singles_2['weather']['voltage']
length = len(sig)
dur = singles_2['weather']['time'][length-1]
widths = np.linspace(.01,10,50)
wt_out = signal.cwt(sig3, signal.ricker, widths)
# print (wt_out, wt_out.shape)
plt.imshow(wt_out, extent=[0, dur, 10, .01],cmap='PRGn',aspect='auto',vmax=abs(wt_out).max(), vmin=-abs(wt_out).max())
plt.show()
sig3 = singles_3['weather']['voltage']
length = len(sig)
dur = singles_3['weather']['time'][length-1]
widths = np.linspace(.01,10,50)
wt_out = signal.cwt(sig3, signal.ricker, widths)
# print (wt_out, wt_out.shape)
plt.imshow(wt_out, extent=[0, dur, 10, .01],cmap='PRGn',aspect='auto',vmax=abs(wt_out).max(), vmin=-abs(wt_out).max())
plt.show()
# Prepare lists of parameters for our GridSearch
# First, our layer sizes
layer_sizes = []
for i in range(2,5):
for j in range(0,180,30):
if j:
tup = []
for k in range(i):
tup.append(j)
layer_sizes.append(tuple(tup))
print('number layer sizes:',len(layer_sizes),'here be layer sizes',layer_sizes)
# Next, our alpha values
alphas = [0.0001,1,1000]
from sklearn.neural_network import MLPClassifier as MLPC
# Import other models to try for feature extraction
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
import copy
X_train, X_test, y_train, y_test = train_test_split(X_normalized, y, test_size=0.15, random_state=12)
combined_features = FeatureUnion([
('pca',PCA(random_state=18)),
('kbest',SelectKBest(k=1))
])
pipeline = Pipeline([
# ('features', combined_features),
('model', MLPC(random_state=12))
])
param_grid = {
# 'features__pca__n_components':[10,20,50],
'model__solver':['adam'],
'model__hidden_layer_sizes':layer_sizes,
'model__activation':['relu'],
'model__alpha': alphas,
'model__max_iter':[200]
}
grid_search = GridSearchCV(pipeline, param_grid, n_jobs=-1)
manner_classifier = MLPC(solver='adam',random_state=3)
manner_classifier.fit(X_train, y_train['manner'])
m_score = manner_classifier.score(X_test, y_test['manner'])
place_classifier = MLPC(solver='adam',random_state=6)
place_classifier.fit(X_train, y_train['place'])
p_score = place_classifier.score(X_test, y_test['place'])
height_classifier = MLPC(solver='adam',random_state=9)
height_classifier.fit(X_train, y_train['height'])
h_score = height_classifier.score(X_test, y_test['height'])
vowel_classifier = MLPC(solver='adam',random_state=12)
vowel_classifier.fit(X_train, y_train['vowel'])
v_score = vowel_classifier.score(X_test, y_test['vowel'])
print('manner score:',m_score,'place score:',p_score,'height score:',h_score,'vowel score:',v_score)
# print(data_1_proc.head(50), trans_labels['manner'].head(50))
manner_classifier2 = copy.deepcopy(grid_search)
manner_classifier2.fit(X_train, y_train['manner'])
m_score2 = manner_classifier2.score(X_test, y_test['manner'])
print('manner score:',m_score2)
place_classifier2 = copy.deepcopy(grid_search)
place_classifier2.fit(X_train, y_train['place'])
p_score2 = place_classifier2.score(X_test, y_test['place'])
print('place score:',p_score2)
height_classifier2 = copy.deepcopy(grid_search)
height_classifier2.fit(X_train, y_train['height'])
h_score2 = height_classifier2.score(X_test, y_test['height'])
print('height score:',h_score2)
vowel_classifier2 = copy.deepcopy(grid_search)
vowel_classifier2.fit(X_train, y_train['vowel'])
v_score2 = vowel_classifier2.score(X_test, y_test['vowel'])
print('vowel score:',v_score2)
from sklearn.preprocessing import LabelEncoder as LE
from sklearn.feature_extraction import DictVectorizer as DV
from sklearn.preprocessing import MultiLabelBinarizer as MLB
from sklearn.preprocessing import OneHotEncoder as OHE
from collections import Counter
manner_inputs = manner_classifier2.predict(X_normalized)
place_inputs = place_classifier2.predict(X_normalized)
height_inputs = height_classifier2.predict(X_normalized)
vowel_inputs = vowel_classifier2.predict(X_normalized)
# We need to account for each value that each category of label can take on
m_count = Counter()
p_count = Counter()
h_count = Counter()
v_count = Counter()
for row in range(y.shape[0]):
m_count.update([y.iloc[row]['manner']])
p_count.update([y.iloc[row]['place']])
h_count.update([y.iloc[row]['height']])
v_count.update([y.iloc[row]['vowel']])
counters = [m_count,p_count,h_count,v_count]
feature_dict = {}
for count in counters:
current = 0
for feature in count.keys():
feature_dict[feature] = current
current += 1
# Then, we transform the predicted labels with one-hot encoding after
# concatenating the AF outputs and Solution Model Inputs
raw_inputs = copy.deepcopy(y)
for row in range(len(raw_inputs)):
raw_inputs.iloc[row]['manner'] = manner_inputs[row]
raw_inputs.iloc[row]['place'] = place_inputs[row]
raw_inputs.iloc[row]['height'] = height_inputs[row]
raw_inputs.iloc[row]['vowel'] = vowel_inputs[row]
num_labels = copy.deepcopy(raw_inputs)
for row in range(raw_inputs.shape[0]):
m_feat = raw_inputs.iloc[row]['manner']
p_feat = raw_inputs.iloc[row]['place']
h_feat = raw_inputs.iloc[row]['height']
v_feat = raw_inputs.iloc[row]['vowel']
num_labels.iloc[row]['manner'] = feature_dict[m_feat]
num_labels.iloc[row]['place'] = feature_dict[p_feat]
num_labels.iloc[row]['height'] = feature_dict[h_feat]
num_labels.iloc[row]['vowel'] = feature_dict[v_feat]
encoder = OHE()
new_labels = encoder.fit_transform(num_labels)
enc_labels = pandas.DataFrame(new_labels.toarray())
# Finally, we build our new input DataFrame with predicted AF's and processed EMG
X_cols = list(X_normalized.axes[1]) + list(enc_labels.axes[1])
phoneme_inputs = pandas.DataFrame(columns=X_cols)
phoneme_labels = y.axes[0]
for row in range(X.shape[0]):
new_row = X_normalized.iloc[row].append(enc_labels.iloc[row])
new_row.name = X_normalized.iloc[row].name
phoneme_inputs = phoneme_inputs.append(new_row)
# We're ready to split our solution model data for CV
pho_X_train, pho_X_test, pho_y_train, pho_y_test = train_test_split(phoneme_inputs, phoneme_labels, test_size=0.15, random_state=12)
pho2_X_train, pho2_X_test, pho2_y_train, pho2_y_test = train_test_split(X,phoneme_labels, test_size=0.15, random_state=12)
benchmark_gs = GridSearchCV(pipeline, param_grid, n_jobs=-1)
benchmark_gs.fit(pho2_X_train, pho2_y_train)
pho2_score = benchmark_gs.score(pho2_X_test,pho2_y_test)
print(pho2_score)
pho_layer_sizes = []
for i in range(2,10):
for j in range(60,120,30):
if j:
tup = []
for k in range(i):
tup.append(j)
pho_layer_sizes.append(tuple(tup))
print('number layer sizes:',len(pho_layer_sizes),'here be layer sizes',pho_layer_sizes)
# Next, our alpha values
pho_alphas = [0.001,0.1,1,1000]
param_grid = {
# 'features__pca__n_components':[10,20,50],
'model__solver':['adam'],
'model__hidden_layer_sizes':pho_layer_sizes,
'model__activation':['relu'],
'model__alpha': pho_alphas,
'model__max_iter':[300]
}
pho_model_grid_search = GridSearchCV(pipeline, param_grid, n_jobs=-1)
# The Solution Model
phoneme_classifier = pho_model_grid_search
phoneme_classifier.fit(pho_X_train, pho_y_train)
pho_train_f1 = phoneme_classifier.score(pho_X_train, pho_y_train)
print('phoneme classifier training score:',pho_train_f1)
pho_test_score = phoneme_classifier.score(pho_X_test, pho_y_test)
print('phoneme model test score:',pho_test_score)
phonemes = Counter(phoneme_labels)
N = len(phonemes)
total = sum(phonemes.values())
for key in phonemes:
phonemes[key] = phonemes[key] / total
print(key, "represents", str(phonemes[key]*100)+"%","of all samples")
ind = np.arange(N) # the x locations for the groups
width = .66 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, phonemes.values(), width, color='xkcd:purple')
# add some text for labels, title and axes ticks
ax.set_ylabel('Phonemes')
ax.set_title('Phoneme instances by type')
ax.set_xticks(ind)
ax.set_xticklabels(phonemes.keys(),size='xx-small')
ax.legend('Phonemes')
def autolabel(rects):
Attach a text label above each bar displaying its height
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%f' % int(height),
ha='center', va='bottom')
autolabel(rects1)
plt.show()
num_phonemes = []
num_letters = []
for word in labels:
label_length = len(labels[word].axes[0])
phonemes = labels[word].axes[0].values[0:label_length/3]
num_letters.append(len(word))
num_phonemes.append(len(phonemes))
# print(word, ",", labels[word].axes[0].values[0:label_length/3])
print('average word length:', np.mean(num_letters), '+/-',np.std(num_letters))
print('average num phonemes:', np.mean(num_phonemes), '+/-', np.std(num_phonemes))
import random
singles = [singles_1, singles_2, singles_3]
duration = 0
durations = []
dataframe_size = 0
dataframe_sizes = []
voltages = []
for single in singles:
for word in single:
length = len(single[word])
dur = single[word]['time'][length-1]
size = np.sum(single[word].memory_usage())
duration += dur
durations.append(dur)
dataframe_size += size
dataframe_sizes.append(size)
avg_v = np.mean(single[word]['voltage'])
voltages.append(avg_v)
print('total duration:', duration, "seconds")
print('standard deviation of duration:', np.std(durations))
print('total dataframe mem use:', dataframe_size)
print('standard deviation of dataframe mem usage:', np.std(dataframe_sizes))
print('average v:',np.mean(voltages), '+/-', np.std(voltages))
r_volts = []
r_durs = []
for i in range(6):
r_key = random.choice(list(singles_2))
length = len(singles_2[r_key])
dur = singles_2[r_key]['time'][length-1]
avg_v = np.mean(singles_2[r_key]['voltage'])
std_v = np.std(singles_2[r_key]['voltage'])
r_durs.append(dur)
r_volts.append(avg_v)
print(r_key, ',',avg_v,',',std_v)
from sklearn.metrics import f1_score
import math
labels_list = list(set(pho_y_test.values))
labels_list.sort()
bm_score = benchmark_gs.score(pho2_X_test,pho2_y_test)
bm_f1 = f1_score(pho2_y_test, benchmark_gs.predict(pho2_X_test),average=None, labels=labels_list)
sol_score = phoneme_classifier.score(pho_X_test, pho_y_test)
sol_f1 = f1_score(pho_y_test, phoneme_classifier.predict(pho_X_test),average=None, labels=labels_list)
print(bm_score, sol_score, len(pho2_y_test), np.std(bm_f1), np.std(sol_f1), labels)
print('benchmark f1:',bm_f1, 'std error:', np.std(bm_f1)/math.sqrt(len(pho2_y_test)))
print('solution f1:',sol_f1, 'std error:', np.std(sol_f1)/math.sqrt(len(pho_y_test)))
for label in range(len(labels_list)):
print(labels_list[label],',',bm_f1[label],',',sol_f1[label])
phoneme_classifier.best_estimator_
# Parameter Sensitivity test
sensitivity_1 = MLPC(hidden_layer_sizes=(60,60,60,60,60,60,60),alpha=.0001)
sensitivity_1.fit(pho_X_train, pho_y_train)
sens_1 = sensitivity_1.score(pho_X_test, pho_y_test)
print(sens_1)
# Row-wise forcing sensitivity test
pho_X_test_rf = copy.deepcopy(pho_X_test)
pho_X_test_rf['pc-0'] = pho_X_test_rf['pc-0'] * random.random()
rf_test = phoneme_classifier.score(pho_X_test_rf, pho_y_test)
# Column-wise forcing sensitivity test
pho_X_test_cf = copy.deepcopy(pho_X_test)
pho_X_test_cf[1:100] = pho_X_test_rf[1:100] * random.random()
cf_test = phoneme_classifier.score(pho_X_test_cf, pho_y_test)
# Random white noise sensitivity test
pho_X_test_wn = copy.deepcopy(pho_X_test)
pho_X_test_wn.iloc[:,0:10] = pho_X_test_wn.iloc[:,0:10]+np.random.normal(0, 0.15, 10)
wn_test = phoneme_classifier.score(pho_X_test_wn, pho_y_test)
print('row-wise forcing score:', rf_test)
print('column-wise forcing score:', cf_test)
print('white noise addition:', wn_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Writing
Step2: Copying Mode
Step3: Regular Expressions
|
<ASSISTANT_TASK:>
Python Code:
import mmap
with open('lorem.txt', 'r') as f:
with mmap.mmap(f.fileno(), 0,
access=mmap.ACCESS_READ) as m:
print('First 10 bytes via read :', m.read(10))
print('First 10 bytes via slice:', m[:10])
print('2nd 10 bytes via read :', m.read(10))
import mmap
import shutil
# Copy the example file
shutil.copyfile('lorem.txt', 'lorem_copy.txt')
word = b'consectetuer'
reversed = word[::-1]
print('Looking for :', word)
print('Replacing with :', reversed)
with open('lorem_copy.txt', 'r+') as f:
with mmap.mmap(f.fileno(), 0) as m:
print('Before:\n{}'.format(m.readline().rstrip()))
m.seek(0) # rewind
loc = m.find(word)
m[loc:loc + len(word)] = reversed
m.flush()
m.seek(0) # rewind
print('After :\n{}'.format(m.readline().rstrip()))
f.seek(0) # rewind
print('File :\n{}'.format(f.readline().rstrip()))
# import mmap
import shutil
# Copy the example file
shutil.copyfile('lorem.txt', 'lorem_copy.txt')
word = b'consectetuer'
reversed = word[::-1]
print('Looking for :', word)
print('Replacing with :', reversed)
with open('lorem_copy.txt', 'r+') as f:
with mmap.mmap(f.fileno(), 0) as m:
print('Before:\n{}'.format(m.readline().rstrip()))
m.seek(0) # rewind
loc = m.find(word)
m[loc:loc + len(word)] = reversed
m.flush()
m.seek(0) # rewind
print('After :\n{}'.format(m.readline().rstrip()))
f.seek(0) # rewind
print('File :\n{}'.format(f.readline().rstrip()))
import mmap
import shutil
# Copy the example file
shutil.copyfile('lorem.txt', 'lorem_copy.txt')
word = b'consectetuer'
reversed = word[::-1]
with open('lorem_copy.txt', 'r+') as f:
with mmap.mmap(f.fileno(), 0,
access=mmap.ACCESS_COPY) as m:
print('Memory Before:\n{}'.format(
m.readline().rstrip()))
print('File Before :\n{}\n'.format(
f.readline().rstrip()))
m.seek(0) # rewind
loc = m.find(word)
m[loc:loc + len(word)] = reversed
m.seek(0) # rewind
print('Memory After :\n{}'.format(
m.readline().rstrip()))
f.seek(0)
print('File After :\n{}'.format(
f.readline().rstrip()))
import mmap
import re
pattern = re.compile(rb'(\.\W+)?([^.]?nulla[^.]*?\.)',
re.DOTALL | re.IGNORECASE | re.MULTILINE)
with open('lorem.txt', 'r') as f:
with mmap.mmap(f.fileno(), 0,
access=mmap.ACCESS_READ) as m:
for match in pattern.findall(m):
print(match[1].replace(b'\n', b' '))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Description
Step2: SOLUTION
Step3: $$Z_B = \frac{(R_2/(2-s) + jX_2)(jX_M)}{R_2/(2-s) + jX_2 + jX_M}$$
Step4: (a)
Step5: (b)
Step6: (c)
Step7: (d)
Step8: (e)
Step9: (f)
Step10: (g)
Step11: (h)
|
<ASSISTANT_TASK:>
Python Code:
%pylab notebook
%precision %.4g
V = 120 # [V]
p = 4
R1 = 2.0 # [Ohm]
R2 = 2.8 # [Ohm]
X1 = 2.56 # [Ohm]
X2 = 2.56 # [Ohm]
Xm = 60.5 # [Ohm]
s = 0.025
Prot = 51 # [W]
Zf = ((R2/s + X2*1j)*(Xm*1j)) / (R2/s + X2*1j + Xm*1j)
Zf
Zb = ((R2/(2-s) + X2*1j)*(Xm*1j)) / (R2/(2-s) + X2*1j + Xm*1j)
Zb
I1 = V / (R1 +X1*1j + 0.5*Zf + 0.5*Zb)
I1_angle = arctan(I1.imag/I1.real)
print('I1 = {:.3f} V ∠{:.1f}°'.format(abs(I1), I1_angle/pi*180))
Pin = V*abs(I1)*cos(I1_angle)
print('''
Pin = {:.1f} W
============='''.format(Pin))
Pag_f = abs(I1)**2*0.5*Zf.real
Pag_f
Pag_b = abs(I1)**2*0.5*Zb.real
Pag_b
Pag = Pag_f - Pag_b
print('''
Pag = {:.1f} W
============='''.format(Pag))
Pconv_f = (1-s)*Pag_f
Pconv_f
Pconv_b = (1-s)*Pag_b
Pconv_b
Pconv = Pconv_f - Pconv_b
print('''
Pconv = {:.1f} W
==============='''.format(Pconv))
Pout = Pconv - Prot
print('''
Pout = {:.1f} W
=============='''.format(Pout))
n_sync = 1800.0 # [r/min]
w_sync = n_sync * (2.0*pi/1.0) * (1.0/60.0)
tau_ind = Pag / w_sync
print('''
τ_ind = {:.3f} Nm
================'''.format(tau_ind))
w_m = (1-s)*w_sync
tau_load = Pout / w_m
print('''
τ_load = {:.3f} Nm
================='''.format(tau_load))
eta = Pout/Pin
print('''
η = {:.1f} %
=========='''.format(eta*100))
PF = cos(I1_angle)
print('''
PF = {:.3f} lagging
=================='''.format(PF))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Notebook Extensions -- qgrid
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import, division, print_function
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('poster')
# sns.set_style('whitegrid')
sns.set_style('darkgrid')
plt.rcParams['figure.figsize'] = 12, 8 # plotsize
import numpy as np
import pandas as pd
from pandas.tools.plotting import scatter_matrix
from sklearn.datasets import load_boston
df = pd.read_csv("../data/coal_prod_cleaned.csv")
!conda install qgrid -y
# Check out http://nbviewer.ipython.org/github/quantopian/qgrid/blob/master/qgrid_demo.ipynb for more (including demo)
import qgrid # Put imports at the top
qgrid.nbinstall(overwrite=True)
df.head()
qgrid.show_grid(df[['MSHA_ID', 'Year', 'Mine_Name', 'Mine_State', 'Mine_County']], remote_js=True)
%matplotlib inline
%matplotlib notebook
sns.set_context('poster')
sns.set_style('darkgrid')
plt.rcParams['figure.figsize'] = 12, 8 # plotsize
import mpld3
mpld3.enable_notebook()
mpld3.disable_notebook()
plt.scatter(df.Average_Employees,
df.Labor_Hours)
plt.xlabel("Number of Employees")
plt.ylabel("Total Hours Worked");
colors = sns.color_palette(n_colors=5)
color_dict = {key: value for key, value in zip(sorted(df.Year.unique()), colors)}
color_dict
for year in sorted(df.Year.unique()[[0, 2, -1]]):
plt.scatter(df[df.Year == year].Labor_Hours,
df[df.Year == year].Production_short_tons,
c=color_dict[year],
s=50,
label=year,
)
plt.xlabel("Total Hours Worked")
plt.ylabel("Total Amount Produced")
plt.legend()
plt.savefig("ex1.png")
import matplotlib as mpl
mpl.style.use('bmh')
plt.style.available
for year in sorted(df.Year.unique()[[0, 2, -1]]):
plt.scatter(df[df.Year == year].Labor_Hours,
df[df.Year == year].Production_short_tons,
c=color_dict[year],
s=50,
label=year,
)
plt.xlabel("Total Hours Worked")
plt.ylabel("Total Amount Produced")
plt.legend()
# plt.savefig("ex1.png")
df_dict = load_boston()
features = pd.DataFrame(data=df_dict.data, columns = df_dict.feature_names)
target = pd.DataFrame(data=df_dict.target, columns = ['MEDV'])
df = pd.concat([features, target], axis=1)
df.head()
# Target variable
fig, ax = plt.subplots(figsize=(10,8))
sns.distplot(df.MEDV, ax=ax, rug=True, hist=False)
fig, ax = plt.subplots(figsize=(10,7))
sns.kdeplot(df.LSTAT,
df.MEDV,
ax=ax)
fig, ax = plt.subplots(figsize=(10, 10))
scatter_matrix(df[['MEDV', 'LSTAT', 'CRIM', 'RM', 'NOX', 'DIS']], alpha=0.2, diagonal='hist', ax=ax);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For complex model, we can break it down into elementary reactions, for example, the following model
Step2: Setup geometry
Step3: Create a random number generator
Step4: Execise 2
Step5: Create and initialize a solver
Step6: Run the solver and gather simulation data
Step7: In practice, it is often necessary to store simulation data in a numpy array or a file for plotting or further analysis. For example, here we record the number of molcules using numpy array.
Step8: Let's check what is inside the array now
Step9: Execise 3
Step10: Visuzalize simulation data
Step11: Execise 4
Step12: Here is the complete script for our well-mixed kinase simulation
Step13: From well-mixed simulation to spatial simulation
Step14: We now import a tetrahedral mesh using the steps.utilities.meshio module to replace the well-mixed geometry
Step15: Finally, we replace the "Wmdirect" solver with the spatial "Tetexact" solver
Step16: The "Wmdirect" solver and the "Tetexact" solver share most of the APIs, so we can reuse our old script for simulation control and plotting
Step17: Execise 5
Step18: Here is the modified script
|
<ASSISTANT_TASK:>
Python Code:
# Import biochemical model module
import steps.model as smod
# Create model container
mdl = smod.Model()
# Create chemical species
A = smod.Spec('A', mdl)
B = smod.Spec('B', mdl)
C = smod.Spec('C', mdl)
# Create reaction set container
vsys = smod.Volsys('vsys', mdl)
# Create reaction
# A + B - > C with rate 200 /uM.s
reac_f = smod.Reac('reac_f', vsys, lhs=[A,B], rhs = [C])
reac_f.setKcst(200e6)
# Import biochemical model module
import steps.model as smod
# Create model container
execise_mdl = smod.Model()
# Create chemical species
MEKp = smod.Spec('MEKp', execise_mdl)
ERK = smod.Spec('ERK', execise_mdl)
MEKpERK = smod.Spec('MEKpERK', execise_mdl)
ERKp = smod.Spec('ERKp', execise_mdl)
# Create reaction set container (volume system)
execise_vsys = smod.Volsys('execise_vsys', execise_mdl)
# Create reactions (Do it yourself)
# MEKp + ERK -> MEKpERK, rate constant 16.2*10e6
# MEKpERK -> MEKp + ERK, rate constant 0.6
# MEKpERK -> MEKp + ERKp, rate constant 0.15
# Import geometry module
import steps.geom as sgeom
# Create well-mixed geometry container
wmgeom = sgeom.Geom()
# Create cytosol compartment
cyt = sgeom.Comp('cyt', wmgeom)
# Give volume to cyt (1um^3)
cyt.setVol(1.0e-18)
# Assign reaction set to compartment
cyt.addVolsys('vsys')
# Import random number generator module
import steps.rng as srng
# Create random number generator, with buffer size as 256
r = srng.create('mt19937', 256)
# Initialise with some seed
r.initialize(899)
# Could use time to get random seed
#import time
#r.initialize(int(time.time()))
# Import biochemical model module
import steps.model as smod
# Create model container
execise_mdl = smod.Model()
# Create chemical species
MEKp = smod.Spec('MEKp', execise_mdl)
ERK = smod.Spec('ERK', execise_mdl)
MEKpERK = smod.Spec('MEKpERK', execise_mdl)
ERKp = smod.Spec('ERKp', execise_mdl)
# Create reaction set container (volume system)
execise_vsys = smod.Volsys('execise_vsys', execise_mdl)
# Create reactions (Do it yourself)
# MEKp + ERK -> MEKpERK, rate constant 16.2*10e6
MEKp_ERK_to_MEKpERK = smod.Reac('MEKp_ERK_to_MEKpERK', execise_vsys, lhs=[MEKp,ERK], rhs = [MEKpERK])
MEKp_ERK_to_MEKpERK.setKcst(16.2e6)
# MEKpERK -> MEKp + ERK, rate constant 0.6
MEKpERK_to_MEKp_ERK = smod.Reac('MEKpERK_to_MEKp_ERK', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERK])
MEKpERK_to_MEKp_ERK.setKcst(0.6)
# MEKpERK -> MEKp + ERKp, rate constant 0.15
MEKpERK_to_MEKp_ERKp = smod.Reac('MEKpERK_to_MEKp_ERKp', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERKp])
MEKpERK_to_MEKp_ERKp.setKcst(0.15)
####### You script after execise 1 should look like above #######
# Create a compartment of 0.1um^3
# Associate the compartment with the volume system 'vsys'
# Create and initialize a 'r123' random number generator
# Import solver module
import steps.solver as ssolv
# Create Well-mixed Direct solver
sim_direct = ssolv.Wmdirect(mdl, wmgeom, r)
# Inject 10 ‘A’ molecules
sim_direct.setCompCount('cyt','A', 10)
# Set concentration of ‘B’ molecules
sim_direct.setCompConc('cyt', 'B', 0.0332e-6)
# Run simulation for 0.1s
sim_direct.run(0.1)
# Return the number of A molecules
sim_direct.getCompCount('cyt', 'A')
# Reset the solver and reinitizlize molecule counts
sim_direct.reset()
# Inject 10 ‘A’ molecules
sim_direct.setCompCount('cyt','A', 10)
# Set concentration of ‘B’ molecules
sim_direct.setCompConc('cyt', 'B', 0.0332e-6)
# Import numpy
import numpy as np
# Create time-point numpy array, starting at time 0, end at 0.5 second and record data every 0.001 second
tpnt = np.arange(0.0, 0.501, 0.001)
# Calculate number of time points
n_tpnts = len(tpnt)
# Create data array, initialised with zeros
res_direct = np.zeros([n_tpnts, 3])
# Run simulation and record data
for t in range(0, n_tpnts):
sim_direct.run(tpnt[t])
res_direct[t,0] = sim_direct.getCompCount('cyt','A')
res_direct[t,1] = sim_direct.getCompCount('cyt','B')
res_direct[t,2] = sim_direct.getCompCount('cyt','C')
print(res_direct)
# Import biochemical model module
import steps.model as smod
# Create model container
execise_mdl = smod.Model()
# Create chemical species
MEKp = smod.Spec('MEKp', execise_mdl)
ERK = smod.Spec('ERK', execise_mdl)
MEKpERK = smod.Spec('MEKpERK', execise_mdl)
ERKp = smod.Spec('ERKp', execise_mdl)
# Create reaction set container (volume system)
execise_vsys = smod.Volsys('execise_vsys', execise_mdl)
# Create reactions (Do it yourself)
# MEKp + ERK -> MEKpERK, rate constant 16.2*10e6
MEKp_ERK_to_MEKpERK = smod.Reac('MEKp_ERK_to_MEKpERK', execise_vsys, lhs=[MEKp,ERK], rhs = [MEKpERK])
MEKp_ERK_to_MEKpERK.setKcst(16.2e6)
# MEKpERK -> MEKp + ERK, rate constant 0.6
MEKpERK_to_MEKp_ERK = smod.Reac('MEKpERK_to_MEKp_ERK', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERK])
MEKpERK_to_MEKp_ERK.setKcst(0.6)
# MEKpERK -> MEKp + ERKp, rate constant 0.15
MEKpERK_to_MEKp_ERKp = smod.Reac('MEKpERK_to_MEKp_ERKp', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERKp])
MEKpERK_to_MEKp_ERKp.setKcst(0.15)
####### You script after execise 1 should look like above #######
# Create a compartment of 0.1um^3
import steps.geom as sgeom
execise_wmgeom = sgeom.Geom()
execise_cyt = sgeom.Comp('execise_cyt', execise_wmgeom)
execise_cyt.setVol(0.1e-18)
# Associate the compartment with the volume system 'vsys'
execise_cyt.addVolsys('execise_vsys')
# Create and initialize a 'r123' random number generator
import steps.rng as srng
execise_r = srng.create('r123', 256)
execise_r.initialize(1)
####### You script after execise 2 should look like above #######
# Create a "wmdirect" solver and set the initial condition:
# MEKp = 1uM
# ERK = 1.5uM
# Run the simulation for 30 seconds, record concerntrations of each molecule every 0.01 seconds.
from pylab import *
%matplotlib inline
plot(tpnt, res_direct[:,0], label='A')
plot(tpnt, res_direct[:,1], label='B')
plot(tpnt, res_direct[:,2], label='C')
ylabel('Number of molecules')
xlabel('Time(sec)')
legend()
show()
# Import biochemical model module
import steps.model as smod
# Create model container
execise_mdl = smod.Model()
# Create chemical species
MEKp = smod.Spec('MEKp', execise_mdl)
ERK = smod.Spec('ERK', execise_mdl)
MEKpERK = smod.Spec('MEKpERK', execise_mdl)
ERKp = smod.Spec('ERKp', execise_mdl)
# Create reaction set container (volume system)
execise_vsys = smod.Volsys('execise_vsys', execise_mdl)
# Create reactions (Do it yourself)
# MEKp + ERK -> MEKpERK, rate constant 16.2*10e6
MEKp_ERK_to_MEKpERK = smod.Reac('MEKp_ERK_to_MEKpERK', execise_vsys, lhs=[MEKp,ERK], rhs = [MEKpERK])
MEKp_ERK_to_MEKpERK.setKcst(16.2e6)
# MEKpERK -> MEKp + ERK, rate constant 0.6
MEKpERK_to_MEKp_ERK = smod.Reac('MEKpERK_to_MEKp_ERK', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERK])
MEKpERK_to_MEKp_ERK.setKcst(0.6)
# MEKpERK -> MEKp + ERKp, rate constant 0.15
MEKpERK_to_MEKp_ERKp = smod.Reac('MEKpERK_to_MEKp_ERKp', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERKp])
MEKpERK_to_MEKp_ERKp.setKcst(0.15)
####### You script after execise 1 should look like above #######
# Create a compartment of 0.1um^3
import steps.geom as sgeom
execise_wmgeom = sgeom.Geom()
execise_cyt = sgeom.Comp('execise_cyt', execise_wmgeom)
execise_cyt.setVol(0.1e-18)
# Associate the compartment with the volume system 'vsys'
execise_cyt.addVolsys('execise_vsys')
# Create and initialize a 'r123' random number generator
import steps.rng as srng
execise_r = srng.create('r123', 256)
execise_r.initialize(143)
####### You script after execise 2 should look like above #######
# Create a "wmdirect" solver and set the initial condition:
# MEKp = 1uM
# ERK = 1.5uM
import steps.solver as ssolv
execise_sim = ssolv.Wmdirect(execise_mdl, execise_wmgeom, execise_r)
execise_sim.setCompConc('execise_cyt','MEKp', 1e-6)
execise_sim.setCompConc('execise_cyt','ERK', 1.5e-6)
# Run the simulation for 30 seconds, record concerntrations of each molecule every 0.01 seconds.
import numpy as np
execise_tpnts = np.arange(0.0, 30.01, 0.01)
n_tpnts = len(execise_tpnts)
execise_res = np.zeros([n_tpnts, 4])
# Run simulation and record data
for t in range(0, n_tpnts):
execise_sim.run(execise_tpnts[t])
execise_res[t,0] = execise_sim.getCompCount('execise_cyt','MEKp')
execise_res[t,1] = execise_sim.getCompCount('execise_cyt','ERK')
execise_res[t,2] = execise_sim.getCompCount('execise_cyt','MEKpERK')
execise_res[t,3] = execise_sim.getCompCount('execise_cyt','ERKp')
####### You script after execise 3 should look like above #######
# Plot execise_res
# Import biochemical model module
import steps.model as smod
# Create model container
execise_mdl = smod.Model()
# Create chemical species
MEKp = smod.Spec('MEKp', execise_mdl)
ERK = smod.Spec('ERK', execise_mdl)
MEKpERK = smod.Spec('MEKpERK', execise_mdl)
ERKp = smod.Spec('ERKp', execise_mdl)
# Create reaction set container (volume system)
execise_vsys = smod.Volsys('execise_vsys', execise_mdl)
# Create reactions (Do it yourself)
# MEKp + ERK -> MEKpERK, rate constant 16.2*10e6
MEKp_ERK_to_MEKpERK = smod.Reac('MEKp_ERK_to_MEKpERK', execise_vsys, lhs=[MEKp,ERK], rhs = [MEKpERK])
MEKp_ERK_to_MEKpERK.setKcst(16.2e6)
# MEKpERK -> MEKp + ERK, rate constant 0.6
MEKpERK_to_MEKp_ERK = smod.Reac('MEKpERK_to_MEKp_ERK', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERK])
MEKpERK_to_MEKp_ERK.setKcst(0.6)
# MEKpERK -> MEKp + ERKp, rate constant 0.15
MEKpERK_to_MEKp_ERKp = smod.Reac('MEKpERK_to_MEKp_ERKp', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERKp])
MEKpERK_to_MEKp_ERKp.setKcst(0.15)
####### You script after execise 1 should look like above #######
# Create a compartment of 0.1um^3
import steps.geom as sgeom
execise_wmgeom = sgeom.Geom()
execise_cyt = sgeom.Comp('execise_cyt', execise_wmgeom)
execise_cyt.setVol(0.1e-18)
# Associate the compartment with the volume system 'vsys'
execise_cyt.addVolsys('execise_vsys')
# Create and initialize a 'r123' random number generator
import steps.rng as srng
execise_r = srng.create('r123', 256)
execise_r.initialize(143)
####### You script after execise 2 should look like above #######
# Create a "wmdirect" solver and set the initial condition:
# MEKp = 1uM
# ERK = 1.5uM
import steps.solver as ssolv
execise_sim = ssolv.Wmdirect(execise_mdl, execise_wmgeom, execise_r)
execise_sim.setCompConc('execise_cyt','MEKp', 1e-6)
execise_sim.setCompConc('execise_cyt','ERK', 1.5e-6)
# Run the simulation for 30 seconds, record concerntrations of each molecule every 0.01 seconds.
import numpy as np
execise_tpnts = np.arange(0.0, 30.01, 0.01)
n_tpnts = len(execise_tpnts)
execise_res = np.zeros([n_tpnts, 4])
# Run simulation and record data
for t in range(0, n_tpnts):
execise_sim.run(execise_tpnts[t])
execise_res[t,0] = execise_sim.getCompCount('execise_cyt','MEKp')
execise_res[t,1] = execise_sim.getCompCount('execise_cyt','ERK')
execise_res[t,2] = execise_sim.getCompCount('execise_cyt','MEKpERK')
execise_res[t,3] = execise_sim.getCompCount('execise_cyt','ERKp')
####### You script after execise 3 should look like above #######
# Plot execise_res
from pylab import *
plot(execise_tpnts, execise_res[:,0], label='MEKp')
plot(execise_tpnts, execise_res[:,1], label='ERK')
plot(execise_tpnts, execise_res[:,2], label='MEKpERK')
plot(execise_tpnts, execise_res[:,3], label='ERKp')
ylabel('Number of molecules')
xlabel('Time(sec)')
legend()
show()
####### You script after execise 4 should look like above #######
# Import biochemical model module
import steps.model as smod
# Create model container
mdl = smod.Model()
# Create chemical species
A = smod.Spec('A', mdl)
B = smod.Spec('B', mdl)
C = smod.Spec('C', mdl)
# Create reaction set container
vsys = smod.Volsys('vsys', mdl)
# Create reaction
# A + B - > C with rate 200 /uM.s
reac_f = smod.Reac('reac_f', vsys, lhs=[A,B], rhs = [C])
reac_f.setKcst(200e6)
###### Above is the previous well-mixed biochemical model
# We add diffusion rules for species A, B and C
diff_a = smod.Diff('diff_a', vsys, A)
diff_a.setDcst(0.02e-9)
diff_b = smod.Diff('diff_b', vsys, B)
diff_b.setDcst(0.02e-9)
diff_c = smod.Diff('diff_c', vsys, C)
diff_c.setDcst(0.02e-9)
'''
# Import geometry module
import steps.geom as sgeom
# Create well-mixed geometry container
wmgeom = sgeom.Geom()
# Create cytosol compartment
cyt = sgeom.Comp('cyt', wmgeom)
# Give volume to cyt (1um^3)
cyt.setVol(1.0e-18)
# Assign reaction set to compartment
cyt.addVolsys('vsys')
'''
##### above is the old well-mixed geometry ##########
import steps.geom as sgeom
import steps.utilities.meshio as meshio
# Import the mesh
mesh = meshio.importAbaqus('meshes/1x1x1_cube.inp', 1.0e-6)[0]
# Create mesh-based compartment
cyt = sgeom.TmComp('cyt', mesh, range(mesh.ntets))
# Add volume system to the compartment
cyt.addVolsys('vsys')
# Import solver module
import steps.solver as ssolv
'''
# Create Well-mixed Direct solver
sim_direct = ssolv.Wmdirect(mdl, wmgeom, r)
'''
##### above is the old well-mixed Wmdirect solver ##########
# Create a spatial Tetexact solver
sim_tetexact = ssolv.Tetexact(mdl, mesh, r)
# Inject 10 ‘A’ molecules
sim_tetexact.setCompCount('cyt','A', 10)
# Set concentration of ‘B’ molecules
sim_tetexact.setCompConc('cyt', 'B', 0.0332e-6)
# Import numpy
import numpy as np
# Create time-point numpy array, starting at time 0, end at 0.5 second and record data every 0.001 second
tpnt = np.arange(0.0, 0.501, 0.001)
# Calculate number of time points
n_tpnts = len(tpnt)
# Create data array, initialised with zeros
res_tetexact = np.zeros([n_tpnts, 3])
# Run simulation and record data
for t in range(0, n_tpnts):
sim_tetexact.run(tpnt[t])
res_tetexact[t,0] = sim_tetexact.getCompCount('cyt','A')
res_tetexact[t,1] = sim_tetexact.getCompCount('cyt','B')
res_tetexact[t,2] = sim_tetexact.getCompCount('cyt','C')
from pylab import *
plot(tpnt, res_tetexact[:,0], label='A')
plot(tpnt, res_tetexact[:,1], label='B')
plot(tpnt, res_tetexact[:,2], label='C')
ylabel('Number of molecules')
xlabel('Time(sec)')
legend()
show()
# Import biochemical model module
import steps.model as smod
# Create model container
execise_mdl = smod.Model()
# Create chemical species
MEKp = smod.Spec('MEKp', execise_mdl)
ERK = smod.Spec('ERK', execise_mdl)
MEKpERK = smod.Spec('MEKpERK', execise_mdl)
ERKp = smod.Spec('ERKp', execise_mdl)
# Create reaction set container (volume system)
execise_vsys = smod.Volsys('execise_vsys', execise_mdl)
# Create reactions (Do it yourself)
# MEKp + ERK -> MEKpERK, rate constant 16.2*10e6
MEKp_ERK_to_MEKpERK = smod.Reac('MEKp_ERK_to_MEKpERK', execise_vsys, lhs=[MEKp,ERK], rhs = [MEKpERK])
MEKp_ERK_to_MEKpERK.setKcst(16.2e6)
# MEKpERK -> MEKp + ERK, rate constant 0.6
MEKpERK_to_MEKp_ERK = smod.Reac('MEKpERK_to_MEKp_ERK', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERK])
MEKpERK_to_MEKp_ERK.setKcst(0.6)
# MEKpERK -> MEKp + ERKp, rate constant 0.15
MEKpERK_to_MEKp_ERKp = smod.Reac('MEKpERK_to_MEKp_ERKp', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERKp])
MEKpERK_to_MEKp_ERKp.setKcst(0.15)
########### execise 5.1: Add diffusion constants
# * MEKp = 30e-12 m^2/s
# * ERK = 30e-12 m^2/s
# * MEKpERK = 10e-12 m^2/s
####### You script after execise 1 should look like above #######
########### execise 5.2: Replace the geometry to use mesh 'meshes/sp_0.1v_1046.inp'
# Create a compartment of 0.1um^3
import steps.geom as sgeom
execise_wmgeom = sgeom.Geom()
execise_cyt = sgeom.Comp('execise_cyt', execise_wmgeom)
execise_cyt.setVol(0.1e-18)
# Associate the compartment with the volume system 'vsys'
execise_cyt.addVolsys('execise_vsys')
# Create and initialize a 'r123' random number generator
import steps.rng as srng
execise_r = srng.create('r123', 256)
execise_r.initialize(143)
####### You script after execise 2 should look like above #######
# Create a "wmdirect" solver and set the initial condition:
# MEKp = 1uM
# ERK = 1.5uM
import steps.solver as ssolv
########### execise 5.3: Change the solver to Tetexact
execise_sim = ssolv.Wmdirect(execise_mdl, execise_wmgeom, execise_r)
execise_sim.setCompConc('execise_cyt','MEKp', 1e-6)
execise_sim.setCompConc('execise_cyt','ERK', 1.5e-6)
# Run the simulation for 30 seconds, record concerntrations of each molecule every 0.01 seconds.
import numpy as np
execise_tpnts = np.arange(0.0, 30.01, 0.01)
n_tpnts = len(execise_tpnts)
execise_res = np.zeros([n_tpnts, 4])
# Run simulation and record data
for t in range(0, n_tpnts):
execise_sim.run(execise_tpnts[t])
execise_res[t,0] = execise_sim.getCompCount('execise_cyt','MEKp')
execise_res[t,1] = execise_sim.getCompCount('execise_cyt','ERK')
execise_res[t,2] = execise_sim.getCompCount('execise_cyt','MEKpERK')
execise_res[t,3] = execise_sim.getCompCount('execise_cyt','ERKp')
####### You script after execise 3 should look like above #######
# Plot execise_res
from pylab import *
plot(execise_tpnts, execise_res[:,0], label='MEKp')
plot(execise_tpnts, execise_res[:,1], label='ERK')
plot(execise_tpnts, execise_res[:,2], label='MEKpERK')
plot(execise_tpnts, execise_res[:,3], label='ERKp')
ylabel('Number of molecules')
xlabel('Time(sec)')
legend()
show()
####### You script after execise 4 should look like above #######
# Import biochemical model module
import steps.model as smod
# Create model container
execise_mdl = smod.Model()
# Create chemical species
MEKp = smod.Spec('MEKp', execise_mdl)
ERK = smod.Spec('ERK', execise_mdl)
MEKpERK = smod.Spec('MEKpERK', execise_mdl)
ERKp = smod.Spec('ERKp', execise_mdl)
# Create reaction set container (volume system)
execise_vsys = smod.Volsys('execise_vsys', execise_mdl)
# Create reactions (Do it yourself)
# MEKp + ERK -> MEKpERK, rate constant 16.2*10e6
MEKp_ERK_to_MEKpERK = smod.Reac('MEKp_ERK_to_MEKpERK', execise_vsys, lhs=[MEKp,ERK], rhs = [MEKpERK])
MEKp_ERK_to_MEKpERK.setKcst(16.2e6)
# MEKpERK -> MEKp + ERK, rate constant 0.6
MEKpERK_to_MEKp_ERK = smod.Reac('MEKpERK_to_MEKp_ERK', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERK])
MEKpERK_to_MEKp_ERK.setKcst(0.6)
# MEKpERK -> MEKp + ERKp, rate constant 0.15
MEKpERK_to_MEKp_ERKp = smod.Reac('MEKpERK_to_MEKp_ERKp', execise_vsys, lhs = [MEKpERK], rhs=[MEKp,ERKp])
MEKpERK_to_MEKp_ERKp.setKcst(0.15)
########### execise 5.1: Add diffusion constants
# * MEKp = 30e-12 m^2/s
# * ERK = 30e-12 m^2/s
# * MEKpERK = 10e-12 m^2/s
diff_MEKp = smod.Diff('diff_MEKp', execise_vsys, MEKp)
diff_MEKp.setDcst(30e-12)
diff_ERK = smod.Diff('diff_ERK', execise_vsys, ERK)
diff_ERK.setDcst(30e-12)
diff_MEKpERK = smod.Diff('diff_MEKpERK', execise_vsys, MEKpERK)
diff_MEKpERK.setDcst(10e-12)
####### You script after execise 1 should look like above #######
########### execise 5.2: Replace the geometry to use mesh 'meshes/sp_0.1v_1046.inp'
import steps.geom as sgeom
import steps.utilities.meshio as meshio
mesh = meshio.importAbaqus('meshes/sp_0.1v_1046.inp', 1.0e-6)[0]
execise_cyt = sgeom.TmComp('execise_cyt', mesh, range(mesh.ntets))
execise_cyt.addVolsys('execise_vsys')
# Create and initialize a 'r123' random number generator
import steps.rng as srng
execise_r = srng.create('r123', 256)
execise_r.initialize(143)
####### You script after execise 2 should look like above #######
# Create a "wmdirect" solver and set the initial condition:
# MEKp = 1uM
# ERK = 1.5uM
import steps.solver as ssolv
########### execise 5.3: Change the solver to Tetexact
execise_sim = ssolv.Tetexact(execise_mdl, mesh, execise_r)
execise_sim.setCompConc('execise_cyt','MEKp', 1e-6)
execise_sim.setCompConc('execise_cyt','ERK', 1.5e-6)
# Run the simulation for 30 seconds, record concerntrations of each molecule every 0.01 seconds.
import numpy as np
execise_tpnts = np.arange(0.0, 30.01, 0.01)
n_tpnts = len(execise_tpnts)
execise_res = np.zeros([n_tpnts, 4])
# Run simulation and record data
for t in range(0, n_tpnts):
execise_sim.run(execise_tpnts[t])
execise_res[t,0] = execise_sim.getCompCount('execise_cyt','MEKp')
execise_res[t,1] = execise_sim.getCompCount('execise_cyt','ERK')
execise_res[t,2] = execise_sim.getCompCount('execise_cyt','MEKpERK')
execise_res[t,3] = execise_sim.getCompCount('execise_cyt','ERKp')
####### You script after execise 3 should look like above #######
# Plot execise_res
from pylab import *
plot(execise_tpnts, execise_res[:,0], label='MEKp')
plot(execise_tpnts, execise_res[:,1], label='ERK')
plot(execise_tpnts, execise_res[:,2], label='MEKpERK')
plot(execise_tpnts, execise_res[:,3], label='ERKp')
ylabel('Number of molecules')
xlabel('Time(sec)')
legend()
show()
####### You script after execise 4 should look like above #######
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: No código acima, fizemos a leitura do arquivo informando que não há cabeçalho (obrigatório) e embaralhamos os dados.
Step2: Um problema é que nossos atributos categóricos são strings, e a implementção de Decision Tree do scikit-learn só aceita atributos numéricos. Precisamos converter os atributos.
Step3: Faremos a separação dos dados em conjunto de treino e teste
Step4: Preparação de dados ok!
|
<ASSISTANT_TASK:>
Python Code:
import os
import pandas as pd
import math
import numpy as np
from sklearn.tree import DecisionTreeClassifier
headers = ["buying", "maint", "doors", "persons","lug_boot", "safety", "class"]
data = pd.read_csv("car_data.csv", header=None, names=headers)
data = data.sample(frac=1).reset_index(drop=True) # shuffle
data.head()
data.dtypes
for h in headers:
data[h] = data[h].astype('category')
data[h] = data[h].cat.codes
data.set_index("class", inplace=True)
data.head()
size = len(data)
train_size = int(math.floor(size * 0.7))
train_data = data[:train_size]
test_data = data[train_size:]
d_tree = DecisionTreeClassifier(criterion="gini")
d_tree.fit(train_data, train_data.index)
d_tree.predict(test_data.iloc[:, 0:6])
d_tree.score(test_data, test_data.index)
# desenha a arvore
import graphviz
from sklearn import tree
dot_data = tree.export_graphviz(d_tree, out_file=None, feature_names=["buying", "maint", "doors", "persons","lug_boot", "safety", "class"])
graph = graphviz.Source(dot_data)
graph.render("car_dataset")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the endpoint.
Step2: Prepare model
Step3: Configure docker credentials
Step4: Create a config-map in the namespace you're using with the docker config
Step5: Update the DOCKER_REGISTRY and build the training image using Kaniko
Step6: Create a Object Storage Bucket
Step8: Distributed training
Step9: Create the training job
Step10: Get TF Job logs
Step14: Deploy Tensorboard
Step18: Get Tensorboard URL
Step22: Deploy the mnist UI
|
<ASSISTANT_TASK:>
Python Code:
cos_credentials = {
"apikey": "-------",
"cos_hmac_keys": {
"access_key_id": "------",
"secret_access_key": "------"
},
"endpoints": "https://cos-service.bluemix.net/endpoints",
"iam_apikey_description": "------",
"iam_apikey_name": "------",
"iam_role_crn": "------",
"iam_serviceid_crn": "------",
"resource_instance_id": "-------"
}
service_endpoint = 's3.us.cloud-object-storage.appdomain.cloud'
service_endpoint_with_https="https://" + service_endpoint
import logging
import os
import uuid
from importlib import reload
import notebook_setup
reload(notebook_setup)
notebook_setup.notebook_setup(platform=None)
import k8s_util
# Force a reload of kubeflow; since kubeflow is a multi namespace module
# it looks like doing this in notebook_setup may not be sufficient
import kubeflow
reload(kubeflow)
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from kubeflow.tfjob.api import tf_job_client as tf_job_client_module
from IPython.core.display import display, HTML
import yaml
import json
config={
"auths": {
"https://index.docker.io/v1/": {
"auth": "xxxxxxxxxxxxxxx"
}
}
}
with open('config.json', 'w') as outfile:
json.dump(config, outfile)
# !kubectl delete configmap docker-config
!kubectl create configmap docker-config --from-file=config.json
!rm config.json
from kubernetes import client as k8s_client
from kubernetes.client import rest as k8s_rest
from kubeflow import fairing
from kubeflow.fairing import utils as fairing_utils
from kubeflow.fairing.builders import append
from kubeflow.fairing.deployers import job
from kubeflow.fairing.preprocessors import base as base_preprocessor
# Update the DOCKER_REGISTRY to your docker registry!!
DOCKER_REGISTRY = "dockerregistry"
namespace = fairing_utils.get_current_k8s_namespace()
cos_username = cos_credentials['cos_hmac_keys']['access_key_id']
cos_key = cos_credentials['cos_hmac_keys']['secret_access_key']
cos_region = "us-east-1"
logging.info(f"Running in namespace {namespace}")
logging.info(f"Using docker registry {DOCKER_REGISTRY}")
# TODO(https://github.com/kubeflow/fairing/issues/426): We should get rid of this once the default
# Kaniko image is updated to a newer image than 0.7.0.
from kubeflow.fairing import constants
constants.constants.KANIKO_IMAGE = "gcr.io/kaniko-project/executor:v0.14.0"
from kubeflow.fairing.builders import cluster
# output_map is a map of extra files to add to the notebook.
# It is a map from source location to the location inside the context.
output_map = {
"Dockerfile.model": "Dockerfile",
"model.py": "model.py"
}
preprocessor = base_preprocessor.BasePreProcessor(
command=["python"], # The base class will set this.
input_files=[],
path_prefix="/app", # irrelevant since we aren't preprocessing any files
output_map=output_map)
preprocessor.preprocess()
# Use a Tensorflow image as the base image
# We use a custom Dockerfile
from kubeflow.fairing.cloud.k8s import MinioUploader
from kubeflow.fairing.builders.cluster.minio_context import MinioContextSource
minio_uploader = MinioUploader(endpoint_url=service_endpoint_with_https, minio_secret=cos_username, minio_secret_key=cos_key, region_name=cos_region)
minio_context_source = MinioContextSource(endpoint_url=service_endpoint_with_https, minio_secret=cos_username, minio_secret_key=cos_key, region_name=cos_region)
# TODO: Add IBM Container registry as part of the fairing SDK.
cluster_builder = cluster.cluster.ClusterBuilder(registry=DOCKER_REGISTRY,
base_image="", # base_image is set in the Dockerfile
preprocessor=preprocessor,
image_name="mnist",
dockerfile_path="Dockerfile",
context_source=minio_context_source)
cluster_builder.build()
logging.info(f"Built image {cluster_builder.image_tag}")
mnist_bucket = f"{DOCKER_REGISTRY}-mnist"
minio_uploader.create_bucket(mnist_bucket)
logging.info(f"Bucket {mnist_bucket} created or already exists")
train_name = f"mnist-train-{uuid.uuid4().hex[:4]}"
num_ps = 1
num_workers = 2
model_dir = f"s3://{mnist_bucket}/mnist"
export_path = f"s3://{mnist_bucket}/mnist/export"
train_steps = 200
batch_size = 100
learning_rate = .01
image = cluster_builder.image_tag
train_spec = fapiVersion: kubeflow.org/v1
kind: TFJob
metadata:
name: {train_name}
spec:
tfReplicaSpecs:
Ps:
replicas: {num_ps}
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
serviceAccount: default-editor
containers:
- name: tensorflow
command:
- python
- /opt/model.py
- --tf-model-dir={model_dir}
- --tf-export-dir={export_path}
- --tf-train-steps={train_steps}
- --tf-batch-size={batch_size}
- --tf-learning-rate={learning_rate}
env:
- name: S3_ENDPOINT
value: {service_endpoint}
- name: AWS_REGION
value: {cos_region}
- name: BUCKET_NAME
value: {mnist_bucket}
- name: S3_USE_HTTPS
value: "1"
- name: S3_VERIFY_SSL
value: "1"
- name: AWS_ACCESS_KEY_ID
value: {cos_username}
- name: AWS_SECRET_ACCESS_KEY
value: {cos_key}
image: {image}
workingDir: /opt
restartPolicy: OnFailure
Chief:
replicas: 1
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
serviceAccount: default-editor
containers:
- name: tensorflow
command:
- python
- /opt/model.py
- --tf-model-dir={model_dir}
- --tf-export-dir={export_path}
- --tf-train-steps={train_steps}
- --tf-batch-size={batch_size}
- --tf-learning-rate={learning_rate}
env:
- name: S3_ENDPOINT
value: {service_endpoint}
- name: AWS_REGION
value: {cos_region}
- name: BUCKET_NAME
value: {mnist_bucket}
- name: S3_USE_HTTPS
value: "1"
- name: S3_VERIFY_SSL
value: "1"
- name: AWS_ACCESS_KEY_ID
value: {cos_username}
- name: AWS_SECRET_ACCESS_KEY
value: {cos_key}
image: {image}
workingDir: /opt
restartPolicy: OnFailure
Worker:
replicas: 1
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
serviceAccount: default-editor
containers:
- name: tensorflow
command:
- python
- /opt/model.py
- --tf-model-dir={model_dir}
- --tf-export-dir={export_path}
- --tf-train-steps={train_steps}
- --tf-batch-size={batch_size}
- --tf-learning-rate={learning_rate}
env:
- name: S3_ENDPOINT
value: {service_endpoint}
- name: AWS_REGION
value: {cos_region}
- name: BUCKET_NAME
value: {mnist_bucket}
- name: S3_USE_HTTPS
value: "1"
- name: S3_VERIFY_SSL
value: "1"
- name: AWS_ACCESS_KEY_ID
value: {cos_username}
- name: AWS_SECRET_ACCESS_KEY
value: {cos_key}
image: {image}
workingDir: /opt
restartPolicy: OnFailure
tf_job_client = tf_job_client_module.TFJobClient()
tf_job_body = yaml.safe_load(train_spec)
tf_job = tf_job_client.create(tf_job_body, namespace=namespace)
logging.info(f"Created job {namespace}.{train_name}")
from kubeflow.tfjob import TFJobClient
tfjob_client = TFJobClient()
tfjob_client.wait_for_job(train_name, namespace=namespace, watch=True)
tfjob_client.get_logs(train_name, namespace=namespace)
tb_name = "mnist-tensorboard"
tb_deploy = fapiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: mnist-tensorboard
name: {tb_name}
namespace: {namespace}
spec:
selector:
matchLabels:
app: mnist-tensorboard
template:
metadata:
labels:
app: mnist-tensorboard
version: v1
spec:
serviceAccount: default-editor
containers:
- command:
- /usr/local/bin/tensorboard
- --logdir={model_dir}
- --port=80
image: tensorflow/tensorflow:1.15.2-py3
env:
- name: S3_ENDPOINT
value: {service_endpoint}
- name: AWS_REGION
value: {cos_region}
- name: BUCKET_NAME
value: {mnist_bucket}
- name: S3_USE_HTTPS
value: "1"
- name: S3_VERIFY_SSL
value: "1"
- name: AWS_ACCESS_KEY_ID
value: {cos_username}
- name: AWS_SECRET_ACCESS_KEY
value: {cos_key}
name: tensorboard
ports:
- containerPort: 80
tb_service = fapiVersion: v1
kind: Service
metadata:
labels:
app: mnist-tensorboard
name: {tb_name}
namespace: {namespace}
spec:
ports:
- name: http-tb
port: 80
targetPort: 80
selector:
app: mnist-tensorboard
type: ClusterIP
tb_virtual_service = fapiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: {tb_name}
namespace: {namespace}
spec:
gateways:
- kubeflow/kubeflow-gateway
hosts:
- '*'
http:
- match:
- uri:
prefix: /mnist/{namespace}/tensorboard/
rewrite:
uri: /
route:
- destination:
host: {tb_name}.{namespace}.svc.cluster.local
port:
number: 80
timeout: 300s
tb_specs = [tb_deploy, tb_service, tb_virtual_service]
k8s_util.apply_k8s_specs(tb_specs, k8s_util.K8S_CREATE_OR_REPLACE)
deploy_name = "mnist-model"
model_base_path = export_path
# The web ui defaults to mnist-service so if you change it you will
# need to change it in the UI as well to send predictions to the mode
model_service = "mnist-service"
deploy_spec = fapiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: mnist
name: {deploy_name}
namespace: {namespace}
spec:
selector:
matchLabels:
app: mnist-model
template:
metadata:
# TODO(jlewi): Right now we disable the istio side car because otherwise ISTIO rbac will prevent the
# UI from sending RPCs to the server. We should create an appropriate ISTIO rbac authorization
# policy to allow traffic from the UI to the model servier.
# https://istio.io/docs/concepts/security/#target-selectors
annotations:
sidecar.istio.io/inject: "false"
labels:
app: mnist-model
version: v1
spec:
serviceAccount: default-editor
containers:
- args:
- --port=9000
- --rest_api_port=8500
- --model_name=mnist
- --model_base_path={model_base_path}
command:
- /usr/bin/tensorflow_model_server
env:
- name: modelBasePath
value: {model_base_path}
- name: S3_ENDPOINT
value: {service_endpoint}
- name: AWS_REGION
value: {cos_region}
- name: BUCKET_NAME
value: {mnist_bucket}
- name: S3_USE_HTTPS
value: "1"
- name: S3_VERIFY_SSL
value: "1"
- name: AWS_ACCESS_KEY_ID
value: {cos_username}
- name: AWS_SECRET_ACCESS_KEY
value: {cos_key}
image: tensorflow/serving:1.15.0
imagePullPolicy: IfNotPresent
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 30
tcpSocket:
port: 9000
name: mnist
ports:
- containerPort: 9000
- containerPort: 8500
resources:
limits:
cpu: "4"
memory: 4Gi
requests:
cpu: "1"
memory: 1Gi
volumeMounts:
- mountPath: /var/config/
name: model-config
volumes:
- configMap:
name: {deploy_name}
name: model-config
service_spec = fapiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/path: /monitoring/prometheus/metrics
prometheus.io/port: "8500"
prometheus.io/scrape: "true"
labels:
app: mnist-model
name: {model_service}
namespace: {namespace}
spec:
ports:
- name: grpc-tf-serving
port: 9000
targetPort: 9000
- name: http-tf-serving
port: 8500
targetPort: 8500
selector:
app: mnist-model
type: ClusterIP
monitoring_config = fkind: ConfigMap
apiVersion: v1
metadata:
name: {deploy_name}
namespace: {namespace}
data:
monitoring_config.txt: |-
prometheus_config: {{
enable: true,
path: "/monitoring/prometheus/metrics"
}}
model_specs = [deploy_spec, service_spec, monitoring_config]
k8s_util.apply_k8s_specs(model_specs, k8s_util.K8S_CREATE_OR_REPLACE)
ui_name = "mnist-ui"
ui_deploy = fapiVersion: apps/v1
kind: Deployment
metadata:
name: {ui_name}
namespace: {namespace}
spec:
replicas: 1
selector:
matchLabels:
app: mnist-web-ui
template:
metadata:
labels:
app: mnist-web-ui
spec:
containers:
- image: gcr.io/kubeflow-examples/mnist/web-ui:v20190112-v0.2-142-g3b38225
name: web-ui
ports:
- containerPort: 5000
serviceAccount: default-editor
ui_service = fapiVersion: v1
kind: Service
metadata:
annotations:
name: {ui_name}
namespace: {namespace}
spec:
ports:
- name: http-mnist-ui
port: 80
targetPort: 5000
selector:
app: mnist-web-ui
type: ClusterIP
ui_virtual_service = fapiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: {ui_name}
namespace: {namespace}
spec:
gateways:
- kubeflow/kubeflow-gateway
hosts:
- '*'
http:
- match:
- uri:
prefix: /mnist/{namespace}/ui/
rewrite:
uri: /
route:
- destination:
host: {ui_name}.{namespace}.svc.cluster.local
port:
number: 80
timeout: 300s
ui_specs = [ui_deploy, ui_service, ui_virtual_service]
k8s_util.apply_k8s_specs(ui_specs, k8s_util.K8S_CREATE_OR_REPLACE)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NOTE
Step2: As expected, our boundaries stayed the same but our probabilities are less spread out. Looking good!
Step3: Great! We've successfully decomposed the space around the Pentagon, so we can tell the automatic security bots where the suspect is without having to pull out a map of the Pentagon and show them directly where on the map our intruder may be. That is, we've replaced communication of specific coordinates with the communication of 'zones' formed by spatial relationships to landmarks.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
from cops_and_robots.robo_tools.fusion.softmax import SoftMax, make_regular_2D_poly
poly = make_regular_2D_poly(5, max_r=2, theta=np.pi/3.1)
labels = ['Interior',
'Mall Terrace Entrance',
'Heliport Facade',
'South Parking Entrance',
'Concourse Entrance',
'River Terrace Entrance',
]
sm = SoftMax(poly=poly, class_labels=labels, resolution=0.1)
sm.plot(plot_poly=True, plot_normals=False)
steepness = 5
sm = SoftMax(poly=poly, class_labels=labels, resolution=0.1, steepness=5)
sm.plot(plot_poly=True, plot_normals=False)
poly = make_regular_2D_poly(5, max_r=2, theta=-np.pi/4, origin=(-2,3))
sm = SoftMax(poly=poly, class_labels=labels, resolution=0.1, steepness=5)
sm.plot(plot_poly=True, plot_normals=False)
poly = Polygon([(-1.0, 0.0),
(-1.0, 1.0),
(-3.5, 3.3),
(-3.0, -2.0),
(-2.0, -2.0),
])
sm = SoftMax(poly=poly, steepness=6)
sm.plot(plot_poly=True)
from IPython.core.display import HTML
# Borrowed style from Probabilistic Programming and Bayesian Methods for Hackers
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Class collaboration
Step7: 2. Extend our music-maker
Step8: 3. Initializing commands
Step9: 4. Making the score
|
<ASSISTANT_TASK:>
Python Code:
class IndicatorCommand:
Indicator command.
def __init__(self, indicator, selector):
self.indicator = indicator
self.selector = selector
def __call__(self, music):
for selection in self.selector(music):
indicator = copy.copy(self.indicator)
abjad.attach(indicator, selection)
class MusicMaker:
def __init__(
self,
counts,
denominator,
pitches,
commands=None,
):
self.counts = counts
self.denominator = denominator
self.pitches = pitches
self.commands = commands or ()
def __call__(self, time_signatures):
Calls music-maker on time signatures.
time_signatures = [abjad.TimeSignature(_) for _ in time_signatures]
staff = self._make_notes_and_rests(
self.counts,
self.denominator,
time_signatures,
)
self._impose_time_signatures(staff, time_signatures)
self._pitch_notes(staff, self.pitches)
self._call_commands(staff)
return staff
def _make_notes_and_rests(self, counts, denominator, time_signatures):
Makes notes and rests.
durations = [_.duration for _ in time_signatures]
total_duration = sum(durations)
talea = rmakers.Talea(counts, denominator)
talea_index = 0
leaves = []
current_duration = abjad.Duration(0)
while current_duration < total_duration:
leaf_duration = talea[talea_index]
if 0 < leaf_duration:
pitch = abjad.NamedPitch("c'")
else:
pitch = None
leaf_duration = abs(leaf_duration)
if total_duration < (leaf_duration + current_duration):
leaf_duration = total_duration - current_duration
leaves_ = abjad.LeafMaker()([pitch], [leaf_duration])
leaves.extend(leaves_)
current_duration += leaf_duration
talea_index += 1
staff = abjad.Staff(leaves)
return staff
def _impose_time_signatures(self, staff, time_signatures):
Imposes time signatures.
selections = abjad.mutate.split(staff[:], time_signatures, cyclic=True)
for time_signature, selection in zip(time_signatures, selections):
abjad.attach(time_signature, selection[0])
measure_selections = abjad.select(staff).leaves().group_by_measure()
for time_signature, measure_selection in zip(time_signatures, measure_selections):
abjad.Meter.rewrite_meter(measure_selection, time_signature)
def _pitch_notes(self, staff, pitches):
Pitches notes.
pitches = abjad.CyclicTuple(pitches)
plts = abjad.select(staff).logical_ties(pitched=True)
for i, plt in enumerate(plts):
pitch = pitches[i]
for note in plt:
note.written_pitch = pitch
def _call_commands(self, staff):
Calls commands.
for command in self.commands:
command(staff)
start_beam_command = IndicatorCommand(
indicator=abjad.StartBeam(),
selector=abjad.select().runs().map(abjad.select().leaf(0)),
)
stop_beam_command = IndicatorCommand(
indicator=abjad.StopBeam(),
selector=abjad.select().runs().map(abjad.select().leaf(-1)),
)
start_slur_command = IndicatorCommand(
indicator=abjad.StartSlur(),
selector=abjad.select().runs().get([0], 2).map(abjad.select().leaf(0)),
)
stop_slur_command = IndicatorCommand(
indicator=abjad.StopSlur(),
selector=abjad.select().runs().get([0], 2).map(abjad.select().leaf(-1)),
)
accent_command = IndicatorCommand(
indicator=abjad.Articulation("accent"),
selector=abjad.select().runs().map(abjad.select().leaf(0)),
)
staccato_command = IndicatorCommand(
indicator=abjad.Articulation("staccato"),
selector=abjad.select().runs().map(abjad.select().leaves()[1:]).flatten(),
)
fast_music_maker = MusicMaker(
counts=[1, 1, 1, 1, 1, -1],
denominator=16,
pitches="d' fs' a' d'' g' ef'".split(),
commands=[
start_beam_command,
stop_beam_command,
start_slur_command,
stop_slur_command,
accent_command,
staccato_command,
],
)
staff = fast_music_maker(6 * [(3, 4), (5, 8), (4, 4)])
score = abjad.Score([staff])
lilypond_file = abjad.LilyPondFile.new(
music=score,
includes=["stylesheet.ily"],
)
abjad.show(lilypond_file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Acquire Dee Dataset from Methods Matter
Step2: Summary Statistics
Step3: Cross-Tabulation
Step4: Correlation Matrix
Step5: Linear Regression of REGISTER on COLLEGE
Step6: Two-Stage Least Squares Regression of REGISTER ~ COLLEGE where IV=DISTANCE
Step7: ^^^^^^ Not sure what's going on with the R2 statistic here (it's 0.001 here, versus 0.022 in the example), although everything else matches what we see from the Stata output in the published example
Step8: Two-Stage Least Squares Regression of REGISTER ~ COLLEGE + BLACK + HISPANIC + OTHERRACE where IV=DISTANCE
Step9: Interactions Between the Endogenous Question Predictor and Exogenous Covariates in the Second Stage Model
Step10: ^^^ in this particular case, we find no significant interactions and fall back on our previous model, which simply included race/ethnicity as a covariate
Step11: Binomial Regression
|
<ASSISTANT_TASK:>
Python Code:
# THINGS TO IMPORT
# This is a baseline set of libraries I import by default if I'm rushed for time.
import codecs # load UTF-8 Content
import json # load JSON files
import pandas as pd # Pandas handles dataframes
import numpy as np # Numpy handles lots of basic maths operations
import matplotlib.pyplot as plt # Matplotlib for plotting
import seaborn as sns # Seaborn for beautiful plots
from dateutil import * # I prefer dateutil for parsing dates
import math # transformations
import statsmodels.formula.api as smf # for doing statistical regression
import statsmodels.api as sm # access to the wider statsmodels library, including R datasets
from collections import Counter # Counter is useful for grouping and counting
import scipy
import urllib2
import os.path
if(os.path.isfile("dee.dta")!=True):
response = urllib2.urlopen("http://www.ats.ucla.edu/stat/stata/examples/methods_matter/chapter10/dee.dta")
if(response.getcode()==200):
f = open("dee.dta","w")
f.write(response.read())
f.close()
dee_df = pd.read_stata("dee.dta")
dee_df[['register','college', 'distance']].describe()
print pd.crosstab(dee_df.register, dee_df.college)
chi2 = scipy.stats.chi2_contingency(pd.crosstab(dee_df.register, dee_df.college))
print "chi2: %(c)d" % {"c":chi2[0]}
print "p: %(p)0.03f" % {"p":chi2[1]}
print "df: %(df)0.03f" % {"df":chi2[2]}
print "expected:"
print chi2[3]
sns.corrplot(dee_df[['register','college','distance']])
result = smf.ols(formula = "register ~ college", data = dee_df).fit()
print result.summary()
print "=============================================================================="
print " FIRST STAGE"
print "=============================================================================="
result = smf.ols(formula = "college ~ distance", data = dee_df).fit()
print result.summary()
dee_df['college_fitted'] = result.predict()
print
print
print "=============================================================================="
print " SECOND STAGE"
print "=============================================================================="
result = smf.ols(formula = "register ~ college_fitted", data=dee_df).fit()
print result.summary()
sns.corrplot(dee_df[['register','college','distance', 'black','hispanic','otherrace']])
print "=============================================================================="
print " FIRST STAGE"
print "=============================================================================="
result = smf.ols(formula = "college ~ distance + black + hispanic + otherrace", data = dee_df).fit()
print result.summary()
dee_df['college_fitted'] = result.predict()
print
print
print "=============================================================================="
print " SECOND STAGE"
print "=============================================================================="
result = smf.ols(formula = "register ~ college_fitted + black + hispanic + otherrace", data=dee_df).fit()
print result.summary()
print "=============================================================================="
print " FIRST STAGE"
print "=============================================================================="
# generate the stage one main effect instrument
result = smf.ols(formula = "college ~ distance + black + hispanic + otherrace +" +
"distance:black + distance:hispanic + distance:otherrace", data = dee_df).fit()
dee_df['college_fitted'] = result.predict()
print result.summary()
# generate the stage one interaction instrument for distance:black
# note that we have DROPPED the irrelevant terms.
# The full form for each interaction, which gives the exact same result, is:
# result = smf.ols(formula = "college:black ~ distance + black + hispanic + otherrace +" +
# "distance:black + distance:hispanic + distance:otherrace", data = dee_df).fit()
result = smf.ols(formula = "college:black ~ distance + black + distance:black", data = dee_df).fit()
dee_df['collegeXblack'] = result.predict()
# generate the stage one interaction instrument for distance:hispanic
result = smf.ols(formula = "college:hispanic ~ distance + hispanic + distance:hispanic", data = dee_df).fit()
dee_df['collegeXhispanic'] = result.predict()
# generate the stage one interaction instrument for distance:hispanic
result = smf.ols(formula = "college:otherrace ~ distance + otherrace + distance:otherrace", data = dee_df).fit()
dee_df['collegeXotherrace'] = result.predict()
# generate the final model, that includes these interactions as predictors
result = smf.ols(formula = "register ~ college_fitted + black + hispanic + otherrace +" +
"collegeXblack + collegeXhispanic + collegeXotherrace", data = dee_df).fit()
print result.summary()
print "=============================================================================="
print " FIRST STAGE"
print "=============================================================================="
result = smf.glm(formula = "college ~ distance + black + hispanic + otherrace",
data=dee_df,
family=sm.families.Binomial()).fit()
print result.summary()
dee_df['college_fitted'] = result.predict()
print
print
print "=============================================================================="
print " SECOND STAGE"
print "=============================================================================="#
result = smf.glm(formula = "register ~ college_fitted + black + hispanic + otherrace",
data=dee_df,
family=sm.families.Binomial()).fit()
print result.summary()
import patsy
print "=============================================================================="
print " FIRST STAGE"
print "=============================================================================="
a,b = patsy.dmatrices("college ~ distance + black + hispanic + otherrace",
dee_df,return_type="dataframe")
result = sm.Probit(a,b).fit()
print result.summary()
dee_df['college_fitted'] = result.predict()
print
print
print "=============================================================================="
print " SECOND STAGE"
print "=============================================================================="#
a,b = patsy.dmatrices("register ~ college_fitted + black + hispanic + otherrace",
dee_df,return_type="dataframe")
result = sm.Probit(a,b).fit()
print result.summary()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Advanced
Step3: Let's also combine our plotting code into a cohesive function
Step4: Now we can tie our plot function, plot_planck, to the interact function from ipywidgets
|
<ASSISTANT_TASK:>
Python Code:
# Import numpy and alias to "np"
import numpy as np
# Import and alias to "plt"
import matplotlib.pyplot as plt
def planck(wavelength, temp):
Return the emitted radiation from a blackbody of a given temp and wavelength
Args:
wavelength (float): wavelength (m)
temp (float): temperature of black body (Kelvin)
Returns:
float: spectral radiance (W / (sr m^3))
k_b = 1.3806488e-23 # J/K Boltzmann constant
h = 6.626070040e-34 # J s - Planck's constant
c = 3e8 # m/s - speed of light
return ((2 * h * c ** 2) / wavelength ** 5 *
1 / (np.exp(h * c / (wavelength * k_b * temp)) - 1))
def plot_planck(temp):
Plot the spectral radiance for a blackbody of a given temperature
Args:
temp (float): temperature of body
wavelength = np.linspace(1e-8, 10e-6, 1000)
rad = planck(wavelength, temp)
text_x = wavelength[rad.argmax()] * 1e6
text_y = rad.max() / 1e3 / 1e9
temp_str = '%.2f K' % temp
fig, ax = plt.subplots()
ax.plot(wavelength * 1e6, rad / 1e3 / 1e9)
ax.text(text_x, text_y, temp_str, ha='center')
ax.set_xlabel(r'Wavelength ($\mu m$)')
ax.set_ylabel(r'Spectral radiance ($kW \cdot sr^{-1} \cdot m^{-2} \cdot nm^{-1}$)')
ax.set_xlim([1e-8 * 1e6, 10e-6 * 1e6])
%matplotlib nbagg
from ipywidgets import interactive
from IPython.core.display import display
vis = interactive(plot_planck, temp=(250, 10e3, 100))
display(vis)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Downloading the dataset
Step2: Preparing the dataset
Step3: Preparing hyperparameters
Step7: Building a data pipeline
Step8: Visualizing samples
Step9: 3D point cloud visualization
Step10: Building the model
Step11: Defining the loss
Step12: Model training
Step13: Visualizing model output
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
import tensorflow as tf
from tensorflow.keras import layers
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
tf.random.set_seed(123)
annotation_folder = "/dataset/"
if not os.path.exists(os.path.abspath(".") + annotation_folder):
annotation_zip = tf.keras.utils.get_file(
"val.tar.gz",
cache_subdir=os.path.abspath("."),
origin="http://diode-dataset.s3.amazonaws.com/val.tar.gz",
extract=True,
)
path = "val/indoors"
filelist = []
for root, dirs, files in os.walk(path):
for file in files:
filelist.append(os.path.join(root, file))
filelist.sort()
data = {
"image": [x for x in filelist if x.endswith(".png")],
"depth": [x for x in filelist if x.endswith("_depth.npy")],
"mask": [x for x in filelist if x.endswith("_depth_mask.npy")],
}
df = pd.DataFrame(data)
df = df.sample(frac=1, random_state=42)
HEIGHT = 256
WIDTH = 256
LR = 0.0002
EPOCHS = 30
BATCH_SIZE = 32
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, data, batch_size=6, dim=(768, 1024), n_channels=3, shuffle=True):
Initialization
self.data = data
self.indices = self.data.index.tolist()
self.dim = dim
self.n_channels = n_channels
self.batch_size = batch_size
self.shuffle = shuffle
self.min_depth = 0.1
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.data) / self.batch_size))
def __getitem__(self, index):
if (index + 1) * self.batch_size > len(self.indices):
self.batch_size = len(self.indices) - index * self.batch_size
# Generate one batch of data
# Generate indices of the batch
index = self.indices[index * self.batch_size : (index + 1) * self.batch_size]
# Find list of IDs
batch = [self.indices[k] for k in index]
x, y = self.data_generation(batch)
return x, y
def on_epoch_end(self):
Updates indexes after each epoch
self.index = np.arange(len(self.indices))
if self.shuffle == True:
np.random.shuffle(self.index)
def load(self, image_path, depth_map, mask):
Load input and target image.
image_ = cv2.imread(image_path)
image_ = cv2.cvtColor(image_, cv2.COLOR_BGR2RGB)
image_ = cv2.resize(image_, self.dim)
image_ = tf.image.convert_image_dtype(image_, tf.float32)
depth_map = np.load(depth_map).squeeze()
mask = np.load(mask)
mask = mask > 0
max_depth = min(300, np.percentile(depth_map, 99))
depth_map = np.clip(depth_map, self.min_depth, max_depth)
depth_map = np.log(depth_map, where=mask)
depth_map = np.ma.masked_where(~mask, depth_map)
depth_map = np.clip(depth_map, 0.1, np.log(max_depth))
depth_map = cv2.resize(depth_map, self.dim)
depth_map = np.expand_dims(depth_map, axis=2)
depth_map = tf.image.convert_image_dtype(depth_map, tf.float32)
return image_, depth_map
def data_generation(self, batch):
x = np.empty((self.batch_size, *self.dim, self.n_channels))
y = np.empty((self.batch_size, *self.dim, 1))
for i, batch_id in enumerate(batch):
x[i,], y[i,] = self.load(
self.data["image"][batch_id],
self.data["depth"][batch_id],
self.data["mask"][batch_id],
)
return x, y
def visualize_depth_map(samples, test=False, model=None):
input, target = samples
cmap = plt.cm.jet
cmap.set_bad(color="black")
if test:
pred = model.predict(input)
fig, ax = plt.subplots(6, 3, figsize=(50, 50))
for i in range(6):
ax[i, 0].imshow((input[i].squeeze()))
ax[i, 1].imshow((target[i].squeeze()), cmap=cmap)
ax[i, 2].imshow((pred[i].squeeze()), cmap=cmap)
else:
fig, ax = plt.subplots(6, 2, figsize=(50, 50))
for i in range(6):
ax[i, 0].imshow((input[i].squeeze()))
ax[i, 1].imshow((target[i].squeeze()), cmap=cmap)
visualize_samples = next(
iter(DataGenerator(data=df, batch_size=6, dim=(HEIGHT, WIDTH)))
)
visualize_depth_map(visualize_samples)
depth_vis = np.flipud(visualize_samples[1][1].squeeze()) # target
img_vis = np.flipud(visualize_samples[0][1].squeeze()) # input
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
STEP = 3
for x in range(0, img_vis.shape[0], STEP):
for y in range(0, img_vis.shape[1], STEP):
ax.scatter(
[depth_vis[x, y]] * 3,
[y] * 3,
[x] * 3,
c=tuple(img_vis[x, y, :3] / 255),
s=3,
)
ax.view_init(45, 135)
class DownscaleBlock(layers.Layer):
def __init__(
self, filters, kernel_size=(3, 3), padding="same", strides=1, **kwargs
):
super().__init__(**kwargs)
self.convA = layers.Conv2D(filters, kernel_size, strides, padding)
self.convB = layers.Conv2D(filters, kernel_size, strides, padding)
self.reluA = layers.LeakyReLU(alpha=0.2)
self.reluB = layers.LeakyReLU(alpha=0.2)
self.bn2a = tf.keras.layers.BatchNormalization()
self.bn2b = tf.keras.layers.BatchNormalization()
self.pool = layers.MaxPool2D((2, 2), (2, 2))
def call(self, input_tensor):
d = self.convA(input_tensor)
x = self.bn2a(d)
x = self.reluA(x)
x = self.convB(x)
x = self.bn2b(x)
x = self.reluB(x)
x += d
p = self.pool(x)
return x, p
class UpscaleBlock(layers.Layer):
def __init__(
self, filters, kernel_size=(3, 3), padding="same", strides=1, **kwargs
):
super().__init__(**kwargs)
self.us = layers.UpSampling2D((2, 2))
self.convA = layers.Conv2D(filters, kernel_size, strides, padding)
self.convB = layers.Conv2D(filters, kernel_size, strides, padding)
self.reluA = layers.LeakyReLU(alpha=0.2)
self.reluB = layers.LeakyReLU(alpha=0.2)
self.bn2a = tf.keras.layers.BatchNormalization()
self.bn2b = tf.keras.layers.BatchNormalization()
self.conc = layers.Concatenate()
def call(self, x, skip):
x = self.us(x)
concat = self.conc([x, skip])
x = self.convA(concat)
x = self.bn2a(x)
x = self.reluA(x)
x = self.convB(x)
x = self.bn2b(x)
x = self.reluB(x)
return x
class BottleNeckBlock(layers.Layer):
def __init__(
self, filters, kernel_size=(3, 3), padding="same", strides=1, **kwargs
):
super().__init__(**kwargs)
self.convA = layers.Conv2D(filters, kernel_size, strides, padding)
self.convB = layers.Conv2D(filters, kernel_size, strides, padding)
self.reluA = layers.LeakyReLU(alpha=0.2)
self.reluB = layers.LeakyReLU(alpha=0.2)
def call(self, x):
x = self.convA(x)
x = self.reluA(x)
x = self.convB(x)
x = self.reluB(x)
return x
class DepthEstimationModel(tf.keras.Model):
def __init__(self):
super().__init__()
self.ssim_loss_weight = 0.85
self.l1_loss_weight = 0.1
self.edge_loss_weight = 0.9
self.loss_metric = tf.keras.metrics.Mean(name="loss")
f = [16, 32, 64, 128, 256]
self.downscale_blocks = [
DownscaleBlock(f[0]),
DownscaleBlock(f[1]),
DownscaleBlock(f[2]),
DownscaleBlock(f[3]),
]
self.bottle_neck_block = BottleNeckBlock(f[4])
self.upscale_blocks = [
UpscaleBlock(f[3]),
UpscaleBlock(f[2]),
UpscaleBlock(f[1]),
UpscaleBlock(f[0]),
]
self.conv_layer = layers.Conv2D(1, (1, 1), padding="same", activation="tanh")
def calculate_loss(self, target, pred):
# Edges
dy_true, dx_true = tf.image.image_gradients(target)
dy_pred, dx_pred = tf.image.image_gradients(pred)
weights_x = tf.exp(tf.reduce_mean(tf.abs(dx_true)))
weights_y = tf.exp(tf.reduce_mean(tf.abs(dy_true)))
# Depth smoothness
smoothness_x = dx_pred * weights_x
smoothness_y = dy_pred * weights_y
depth_smoothness_loss = tf.reduce_mean(abs(smoothness_x)) + tf.reduce_mean(
abs(smoothness_y)
)
# Structural similarity (SSIM) index
ssim_loss = tf.reduce_mean(
1
- tf.image.ssim(
target, pred, max_val=WIDTH, filter_size=7, k1=0.01 ** 2, k2=0.03 ** 2
)
)
# Point-wise depth
l1_loss = tf.reduce_mean(tf.abs(target - pred))
loss = (
(self.ssim_loss_weight * ssim_loss)
+ (self.l1_loss_weight * l1_loss)
+ (self.edge_loss_weight * depth_smoothness_loss)
)
return loss
@property
def metrics(self):
return [self.loss_metric]
def train_step(self, batch_data):
input, target = batch_data
with tf.GradientTape() as tape:
pred = self(input, training=True)
loss = self.calculate_loss(target, pred)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
self.loss_metric.update_state(loss)
return {
"loss": self.loss_metric.result(),
}
def test_step(self, batch_data):
input, target = batch_data
pred = self(input, training=False)
loss = self.calculate_loss(target, pred)
self.loss_metric.update_state(loss)
return {
"loss": self.loss_metric.result(),
}
def call(self, x):
c1, p1 = self.downscale_blocks[0](x)
c2, p2 = self.downscale_blocks[1](p1)
c3, p3 = self.downscale_blocks[2](p2)
c4, p4 = self.downscale_blocks[3](p3)
bn = self.bottle_neck_block(p4)
u1 = self.upscale_blocks[0](bn, c4)
u2 = self.upscale_blocks[1](u1, c3)
u3 = self.upscale_blocks[2](u2, c2)
u4 = self.upscale_blocks[3](u3, c1)
return self.conv_layer(u4)
optimizer = tf.keras.optimizers.Adam(
learning_rate=LR,
amsgrad=False,
)
model = DepthEstimationModel()
# Define the loss function
cross_entropy = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction="none"
)
# Compile the model
model.compile(optimizer, loss=cross_entropy)
train_loader = DataGenerator(
data=df[:260].reset_index(drop="true"), batch_size=BATCH_SIZE, dim=(HEIGHT, WIDTH)
)
validation_loader = DataGenerator(
data=df[260:].reset_index(drop="true"), batch_size=BATCH_SIZE, dim=(HEIGHT, WIDTH)
)
model.fit(
train_loader,
epochs=EPOCHS,
validation_data=validation_loader,
)
test_loader = next(
iter(
DataGenerator(
data=df[265:].reset_index(drop="true"), batch_size=6, dim=(HEIGHT, WIDTH)
)
)
)
visualize_depth_map(test_loader, test=True, model=model)
test_loader = next(
iter(
DataGenerator(
data=df[300:].reset_index(drop="true"), batch_size=6, dim=(HEIGHT, WIDTH)
)
)
)
visualize_depth_map(test_loader, test=True, model=model)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
d = ({'Date': ['1/1/18','1/1/18','2/1/18','3/1/18','1/2/18','1/3/18','2/1/19','3/1/19'],
'Val': ['A','B','C','D','A','B','C','D']})
df = pd.DataFrame(data=d)
def g(df):
df['Date'] = pd.to_datetime(df['Date'], format='%d/%m/%y')
y = df['Date'].dt.year
m = df['Date'].dt.month
df['Count_d'] = df.groupby('Date')['Date'].transform('size')
df['Count_m'] = df.groupby([y, m])['Date'].transform('size')
df['Count_y'] = df.groupby(y)['Date'].transform('size')
return df
df = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The starting position of the rosen function is set to be
Step2: For paramz to understand your model there is three steps involved
Step3: The class created above only holds the information about the parameters, we still have to implement the objective function to optimize over. For now the class can be instantiated but is not functional yet.
Step4: Step Two
Step5: Step Three
Step6: Model Usage
Step7: This rosen model is a fully working parameterized model for gradient based optimization of the rosen function of scipy.
Step8: Or use the notebook representation
Step9: Note the model just printing the shape (in the value column) of the parameters, as parameters can be any sized arrays or matrices (with arbitrary numbers of dimensions).
Step10: Or by name
Step11: We can redefine the name freely, as long as it does not exist already
Step12: Now r.position will not be accessible anymore!
Step13: Setting Parameters and Automated Updates
Step14: Note that we never actually told the model to update. It listened to changes to any of its parameters and updated accordingly. This update chain is based on the hierarchy of the model structure. Specific values of parameters can be accessed through indexing, just like indexing numpy arrays. In fact Param is a derivative of ndarray and inherits all its traits. Thus, Param can be used in any calculation involved with numpy. Importantly, when using a Param parameter inside a computation, it will be returning a normal numpy array. This prevents unwanted side effects and pointer errors.
Step15: Optimization
Step16: To show the values of the positions itself, we directly print the Param object
Step17: We could also randomize the model by using the convenience function randomize(), on the part we want to randomize. It can be any part of the model, also the whole model can be randomized
Step18: Gradient Checking
Step19: Or on the whole model (verbose or not)
Step20: Or on individual parameters, note that numpy indexing is used
Step21: Constraining Parameter Spaces
Step22: The printing will contain the constraints, either directly on the object, or it lists the constraints contained within a parameter. If a parameter has multiple constraints spread across the Param object all constraints contained in the whole Param object are indicated with {<partial constraint>}
Step23: To show the individual constraints, we look at the Param object of interest directly
Step24: The constraints (and other indexed properties) are held by each parameter as a dictionary with the name. For example the constraints are held in a constraints dictionary, where the keys are the constraints, and the values are the indices this constraint refers to. You can either ask for the constraints of the whole model
Step25: Or the constraints of individual Parameterized objects
Step26: The constraints of subparts of the model are only views into the actual constaints held by the root of the model hierarchy.
Step27: The keen eyed will have noticed, that we did not set any gradients in the above definition. That is because the underlying rosen models handle their gradients directly!
Step28: All options listed above are availible for this model now. No additional steps need to be taken!
Step29: To show the different ways of how constraints are displayed, we constrain different parts of the model and fix parts of it too
Step30: First, we can see, that because two models with the same name were added to dr, the framework renamed the second model to have a unique name. This only happens when two childs of one parameter share the same name. If the two childs not under the same parameter share names, it is just fine, as you can see in the name of x in both models
Step31: Or print only one model
Step32: We can showcase that constraints are mapped to each parameter directly. We can either access the constraints of the whole model directly
Step33: Or for parameters directly
Step34: Note, that the constraints are remapped to directly index the parameters locally. This directly leeds up to the in memory handling of parameters. The root node of the hierarchy holds one parameter array param_array comprising all parameters. The same goes for the gradient gradient
Step35: Each child parameter (and subsequent parameters) have their own view into the memory of the root node
Step36: When changing the param_array of a parameter it directly edits the memory of the root node. This is a big part of the optimization of paramz, as getting and setting parameters works directly in memory and does not need any python routines (such as loops or traversal) functionality.
Step37: Note, that the optimizer array does only contain three values. This is because the first element of the the first rosen model is fixed and is not presented to the optimizer. The transformed gradients can be computed by the root node directly
|
<ASSISTANT_TASK:>
Python Code:
import paramz, numpy as np
from scipy.optimize import rosen_der, rosen
x = np.array([-1,1])
class Rosen(paramz.Model): # Inherit from paramz.Model to ensure all model functionality.
def __init__(self, x, name='rosen'): # Initialize the Rosen model with a numpy array `x` and name `name`.
super(Rosen, self).__init__(name=name) # Call to super to make sure the structure is set up.
self.x = paramz.Param('position', x) # setup a Param object for the position parameter.
self.link_parameter(self.x) # Tell the model that the parameter `x` exists.
r = Rosen(x)
try:
print(r)
except NotImplementedError as e:
print(e)
class Rosen(paramz.Model):
def __init__(self, x, name='rosen'):
super(Rosen, self).__init__(name=name)
self.x = paramz.Param('position', x)
self.link_parameter(self.x)
def objective_function(self): # The function to overwrite for the framework to know about the objective to optimize
return rosen(self.x) # Call the rosenbrock function of scipy as objective function.
class Rosen(paramz.Model):
def __init__(self, x, name='rosen'):
super(Rosen, self).__init__(name=name)
self.x = paramz.Param('position', x)
self.link_parameter(self.x)
def objective_function(self):
return self._obj
def parameters_changed(self): # Overwrite the parameters_changed function for model updates
self._obj = rosen(self.x) # Lazy evaluation of the rosen function only when there is an update
self.x.gradient[:] = rosen_der(self.x) # Compuataion and storing of the gradients for the position parameter
r = Rosen(x)
print(r)
r
r.x
r.position
r.x.name = 'pos'
r
try:
r.position
except AttributeError as v:
print("Attribute Error: " + str(v))
print("Objective before change: {}".format(r._obj))
r.x[0] = 1
print("Objective after change: {}".format(r._obj))
2 * r.x
r.x[:] = [100,5] # Set to a difficult starting position to show the messages of the optimization.
r.optimize(messages=1) # Call the optimization and show the progress.
r.x
np.random.seed(100)
r.randomize()
r.x
r.x.randomize()
r.x
r.x.checkgrad(verbose=1)
r.checkgrad()
r.checkgrad(verbose=1)
r.x[[0]].checkgrad(verbose=1)
r.x[[0]].constrain_bounded(-10,-1)
r.x[[1]].constrain_positive()
r
r.x
list(r.constraints.items())
list(r.x.constraints.items())
class DoubleRosen(paramz.Model):
def __init__(self, x1, x2, name='silly_double'):
super(DoubleRosen, self).__init__(name=name) # Call super to initiate the structure of the model
self.r1 = Rosen(x1) # Instantiate the underlying Rosen classes
self.r2 = Rosen(x2)
# Tell this model, which parameters it has. Models are just the same as parameters:
self.link_parameters(self.r1, self.r2)
def objective_function(self):
return self._obj # Lazy evaluation of the objective
def parameters_changed(self):
self._obj = self.r1._obj + self.r2._obj # Just add both objectives together to optimize both models.
dr = DoubleRosen(np.random.normal(size=2), np.random.normal(size=2))
dr.checkgrad(verbose=1)
dr.r1.constrain_negative()
dr.r1.x[[0]].fix()
dr.r2.x[[1]].constrain_bounded(-30, 5)
dr.r2.x[[0]].constrain_positive()
dr
dr.r2.checkgrad(verbose=1)
dr.r1
print(dr.constraints)
print(dr.r2.constraints)
dr.param_array
dr.r2.param_array
print(dr.param_array)
print(dr.optimizer_array)
dr._transform_gradients(dr.gradient)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Benchmarking our implementation
Step6: <div style="background-color
Step7: <div style="background-color
Step8: <div style="background-color
Step9: <div style="background-color
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import rcParams
rcParams["savefig.dpi"] = 100
rcParams["figure.dpi"] = 100
rcParams["figure.figsize"] = 12, 4
rcParams["font.size"] = 16
rcParams["text.usetex"] = False
rcParams["font.family"] = ["sans-serif"]
rcParams["font.sans-serif"] = ["cmss10"]
rcParams["axes.unicode_minus"] = False
# https://github.com/matplotlib/matplotlib/issues/12039
try:
old_get_unicode_index
except NameError:
print('Patching matplotlib.mathtext.get_unicode_index')
import matplotlib.mathtext as mathtext
old_get_unicode_index = mathtext.get_unicode_index
mathtext.get_unicode_index = lambda symbol, math=True:\
ord('-') if symbol == '-' else old_get_unicode_index(symbol, math)
import numpy as np
from scipy.linalg import cho_factor
def ExpSquaredKernel(t1, t2=None, A=1.0, l=1.0):
Return the ``N x M`` exponential squared
covariance matrix between time vectors `t1`
and `t2`. The kernel has amplitude `A` and
lengthscale `l`.
if t2 is None:
t2 = t1
T2, T1 = np.meshgrid(t2, t1)
return A ** 2 * np.exp(-0.5 * (T1 - T2) ** 2 / l ** 2)
def ln_gp_likelihood(t, y, sigma=0, A=1.0, l=1.0):
Return the log of the GP likelihood of the
data `y(t)` given uncertainty `sigma` and
an Exponential Squared Kernel with amplitude `A`
and length scale `sigma`.
# The covariance and its determinant
npts = len(t)
kernel = ExpSquaredKernel
K = kernel(t, A=A, l=l) + sigma ** 2 * np.eye(npts)
# The marginal log likelihood
log_like = -0.5 * np.dot(y.T, np.linalg.solve(K, y))
log_like -= 0.5 * np.linalg.slogdet(K)[1]
log_like -= 0.5 * npts * np.log(2 * np.pi)
return log_like
def draw_from_gaussian(mu, S, ndraws=1, eps=1e-12):
Generate samples from a multivariate gaussian
specified by covariance ``S`` and mean ``mu``.
(We derived these equations in Day 1, Notebook 01, Exercise 7.)
npts = S.shape[0]
L, _ = cho_factor(S + eps * np.eye(npts), lower=True)
L = np.tril(L)
u = np.random.randn(npts, ndraws)
x = np.dot(L, u) + mu[:, None]
return x.T
def compute_gp(t_train, y_train, t_test, sigma=0, A=1.0, l=1.0):
Compute the mean vector and covariance matrix of a GP
at times `t_test` given training points `y_train(t_train)`.
The training points have uncertainty `sigma` and the
kernel is assumed to be an Exponential Squared Kernel
with amplitude `A` and lengthscale `l`.
# Compute the required matrices
kernel = ExpSquaredKernel
Stt = kernel(t_train, A=1.0, l=1.0)
Stt += sigma ** 2 * np.eye(Stt.shape[0])
Spp = kernel(t_test, A=1.0, l=1.0)
Spt = kernel(t_test, t_train, A=1.0, l=1.0)
# Compute the mean and covariance of the GP
mu = np.dot(Spt, np.linalg.solve(Stt, y_train))
S = Spp - np.dot(Spt, np.linalg.solve(Stt, Spt.T))
return mu, S
%%time
np.random.seed(3)
t = np.linspace(0, 10, 10000)
sigma = np.ones_like(t) * 0.05
gp_mu, gp_S = compute_gp([], [], t, A=1.0, l=1.0)
y = draw_from_gaussian(gp_mu, gp_S)[0] + sigma * np.random.randn(len(t))
%%time
ln_gp_likelihood(t, y, sigma)
import george
%%time
kernel = george.kernels.ExpSquaredKernel(1.0)
gp = george.GP(kernel)
gp.compute(t, sigma)
%%time
print(gp.log_likelihood(y))
%%time
gp.sample()
%%time
gp = george.GP(kernel, solver=george.HODLRSolver)
gp.compute(t, sigma)
%%time
gp.log_likelihood(y)
import celerite
from celerite import terms
%%time
kernel = terms.Matern32Term(np.log(1), np.log(1))
gp = celerite.GP(kernel)
gp.compute(t, sigma)
%%time
gp.log_likelihood(y)
%%time
gp.sample()
import matplotlib.pyplot as plt
from celerite.modeling import Model
import os
# Define the model
class MeanModel(Model):
parameter_names = ("depth", "t0", "dur")
def get_value(self, t):
return -self.depth * np.exp(-0.5 * (t - self.t0) ** 2 / (0.2 * self.dur) ** 2)
mean_model = MeanModel(depth=0.5, t0=0.05, dur=0.7)
mean_model.parameter_bounds = [(0, 1.0), (-0.1, 0.4), (0.1, 1.0)]
true_params = mean_model.get_parameter_vector()
# Simuate the data
np.random.seed(71)
x = np.sort(np.random.uniform(-1, 1, 70))
yerr = np.random.uniform(0.075, 0.1, len(x))
K = 0.2 * np.exp(-0.5 * (x[:, None] - x[None, :]) ** 2 / 10.5)
K[np.diag_indices(len(x))] += yerr ** 2
y = np.random.multivariate_normal(mean_model.get_value(x), K)
y -= np.nanmedian(y)
# Plot the data
plt.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0)
t = np.linspace(-1, 1, 1000)
plt.plot(t, mean_model.get_value(t))
plt.ylabel(r"$y$")
plt.xlabel(r"$t$")
plt.xlim(-1, 1)
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))
plt.title("simulated data");
# Save it
X = np.hstack((x.reshape(-1, 1), y.reshape(-1, 1), yerr.reshape(-1, 1)))
if not (os.path.exists("data")):
os.mkdir("data")
np.savetxt("data/sample_transit.txt", X)
import matplotlib.pyplot as plt
t, y, yerr = np.loadtxt("data/sample_transit.txt", unpack=True)
plt.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0)
plt.xlabel("time")
plt.ylabel("relative flux");
from celerite.modeling import Model
from scipy.optimize import minimize
# Define the transit model as a celerite `Model`
class MeanModel(Model):
parameter_names = ("depth", "t0", "dur")
def get_value(self, t):
return -self.depth * np.exp(-0.5 * (t - self.t0) ** 2 / (0.2 * self.dur) ** 2)
# Instantiate it with some guesses (which are actually the true values in this case!)
mean_model = MeanModel(depth=0.5, t0=0.05, dur=0.7)
mean_model.parameter_bounds = [(0, 1.0), (-0.1, 0.4), (0.1, 1.0)]
true_params = mean_model.get_parameter_vector()
# Set up the GP model
kernel = terms.RealTerm(log_a=np.log(np.var(y)), log_c=0)
gp = celerite.GP(kernel, mean=mean_model, fit_mean=True)
gp.compute(x, yerr)
print("Initial log-likelihood: {0}".format(gp.log_likelihood(y)))
# Define a cost function
def neg_log_like(params, y, gp):
gp.set_parameter_vector(params)
return -gp.log_likelihood(y)
def grad_neg_log_like(params, y, gp):
gp.set_parameter_vector(params)
return -gp.grad_log_likelihood(y)[1]
# Fit for the maximum likelihood parameters
initial_params = gp.get_parameter_vector()
bounds = gp.get_parameter_bounds()
soln = minimize(neg_log_like, initial_params,
method="L-BFGS-B", bounds=bounds, args=(y, gp))
gp.set_parameter_vector(soln.x)
print("Final log-likelihood: {0}".format(-soln.fun))
# Make the maximum likelihood prediction
t = np.linspace(-1, 1, 500)
mu, var = gp.predict(y, t, return_var=True)
std = np.sqrt(var)
# Plot the data
color = "#ff7f0e"
plt.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0)
plt.plot(t, mu, color=color)
plt.fill_between(t, mu+std, mu-std, color=color, alpha=0.3, edgecolor="none")
plt.ylabel(r"$y$")
plt.xlabel(r"$t$")
plt.xlim(-1, 1)
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))
plt.title("maximum likelihood prediction");
def log_probability(params):
gp.set_parameter_vector(params)
lp = gp.log_prior()
if not np.isfinite(lp):
return -np.inf
try:
return gp.log_likelihood(y) + lp
except celerite.solver.LinAlgError:
return -np.inf
import emcee
initial = np.array(soln.x)
ndim, nwalkers = len(initial), 32
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability)
print("Running burn-in...")
p0 = initial + 1e-8 * np.random.randn(nwalkers, ndim)
p0, lp, _ = sampler.run_mcmc(p0, 1000)
print("Running production...")
sampler.reset()
sampler.run_mcmc(p0, 2000);
# Plot the data.
plt.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0)
# Plot 24 posterior samples.
samples = sampler.flatchain
for s in samples[np.random.randint(len(samples), size=24)]:
gp.set_parameter_vector(s)
mu = gp.predict(y, t, return_cov=False)
plt.plot(t, mu, color=color, alpha=0.3)
plt.ylabel(r"$y$")
plt.xlabel(r"$t$")
plt.xlim(-1, 1)
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))
plt.title("posterior predictions");
import corner
names = gp.get_parameter_names()
cols = mean_model.get_parameter_names()
inds = np.array([names.index("mean:"+k) for k in cols])
corner.corner(sampler.flatchain[:, inds], truths=true_params,
labels=[r"depth", r"$t_0$", r"dur"]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: We can now proceed with loading the corpus.
Step3: Here's what a single document looks like.
Step4: Extract our documents and split into training/test sets.
Step5: Set-up Doc2Vec Training & Evaluation Models
Step6: Le and Mikolov note that combining a paragraph vector from Distributed Bag of
Step9: Predictive Evaluation Methods
Step10: Bulk Training & Per-Model Evaluation
Step11: Achieved Sentiment-Prediction Accuracy
Step12: In our testing, contrary to the results of the paper, on this problem,
Step13: (Yes, here the stored vector from 20 epochs of training is usually one of the
Step14: Somewhat, in terms of reviewer tone, movie genre, etc... the MOST
Step15: Do the DBOW words look meaningless? That's because the gensim DBOW model
|
<ASSISTANT_TASK:>
Python Code:
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
import collections
SentimentDocument = collections.namedtuple('SentimentDocument', 'words tags split sentiment')
import io
import re
import tarfile
import os.path
import smart_open
import gensim.utils
def download_dataset(url='http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'):
fname = url.split('/')[-1]
if os.path.isfile(fname):
return fname
# Download the file to local storage first.
with smart_open.open(url, "rb", ignore_ext=True) as fin:
with smart_open.open(fname, 'wb', ignore_ext=True) as fout:
while True:
buf = fin.read(io.DEFAULT_BUFFER_SIZE)
if not buf:
break
fout.write(buf)
return fname
def create_sentiment_document(name, text, index):
_, split, sentiment_str, _ = name.split('/')
sentiment = {'pos': 1.0, 'neg': 0.0, 'unsup': None}[sentiment_str]
if sentiment is None:
split = 'extra'
tokens = gensim.utils.to_unicode(text).split()
return SentimentDocument(tokens, [index], split, sentiment)
def extract_documents():
fname = download_dataset()
index = 0
with tarfile.open(fname, mode='r:gz') as tar:
for member in tar.getmembers():
if re.match(r'aclImdb/(train|test)/(pos|neg|unsup)/\d+_\d+.txt$', member.name):
member_bytes = tar.extractfile(member).read()
member_text = member_bytes.decode('utf-8', errors='replace')
assert member_text.count('\n') == 0
yield create_sentiment_document(member.name, member_text, index)
index += 1
alldocs = list(extract_documents())
print(alldocs[27])
train_docs = [doc for doc in alldocs if doc.split == 'train']
test_docs = [doc for doc in alldocs if doc.split == 'test']
print(f'{len(alldocs)} docs: {len(train_docs)} train-sentiment, {len(test_docs)} test-sentiment')
import multiprocessing
from collections import OrderedDict
import gensim.models.doc2vec
assert gensim.models.doc2vec.FAST_VERSION > -1, "This will be painfully slow otherwise"
from gensim.models.doc2vec import Doc2Vec
common_kwargs = dict(
vector_size=100, epochs=20, min_count=2,
sample=0, workers=multiprocessing.cpu_count(), negative=5, hs=0,
)
simple_models = [
# PV-DBOW plain
Doc2Vec(dm=0, **common_kwargs),
# PV-DM w/ default averaging; a higher starting alpha may improve CBOW/PV-DM modes
Doc2Vec(dm=1, window=10, alpha=0.05, comment='alpha=0.05', **common_kwargs),
# PV-DM w/ concatenation - big, slow, experimental mode
# window=5 (both sides) approximates paper's apparent 10-word total window size
Doc2Vec(dm=1, dm_concat=1, window=5, **common_kwargs),
]
for model in simple_models:
model.build_vocab(alldocs)
print(f"{model} vocabulary scanned & state initialized")
models_by_name = OrderedDict((str(model), model) for model in simple_models)
from gensim.test.test_doc2vec import ConcatenatedDoc2Vec
models_by_name['dbow+dmm'] = ConcatenatedDoc2Vec([simple_models[0], simple_models[1]])
models_by_name['dbow+dmc'] = ConcatenatedDoc2Vec([simple_models[0], simple_models[2]])
import numpy as np
import statsmodels.api as sm
from random import sample
def logistic_predictor_from_data(train_targets, train_regressors):
Fit a statsmodel logistic predictor on supplied data
logit = sm.Logit(train_targets, train_regressors)
predictor = logit.fit(disp=0)
# print(predictor.summary())
return predictor
def error_rate_for_model(test_model, train_set, test_set):
Report error rate on test_doc sentiments, using supplied model and train_docs
train_targets = [doc.sentiment for doc in train_set]
train_regressors = [test_model.dv[doc.tags[0]] for doc in train_set]
train_regressors = sm.add_constant(train_regressors)
predictor = logistic_predictor_from_data(train_targets, train_regressors)
test_regressors = [test_model.dv[doc.tags[0]] for doc in test_set]
test_regressors = sm.add_constant(test_regressors)
# Predict & evaluate
test_predictions = predictor.predict(test_regressors)
corrects = sum(np.rint(test_predictions) == [doc.sentiment for doc in test_set])
errors = len(test_predictions) - corrects
error_rate = float(errors) / len(test_predictions)
return (error_rate, errors, len(test_predictions), predictor)
from collections import defaultdict
error_rates = defaultdict(lambda: 1.0) # To selectively print only best errors achieved
from random import shuffle
shuffled_alldocs = alldocs[:]
shuffle(shuffled_alldocs)
for model in simple_models:
print(f"Training {model}")
model.train(shuffled_alldocs, total_examples=len(shuffled_alldocs), epochs=model.epochs)
print(f"\nEvaluating {model}")
err_rate, err_count, test_count, predictor = error_rate_for_model(model, train_docs, test_docs)
error_rates[str(model)] = err_rate
print("\n%f %s\n" % (err_rate, model))
for model in [models_by_name['dbow+dmm'], models_by_name['dbow+dmc']]:
print(f"\nEvaluating {model}")
err_rate, err_count, test_count, predictor = error_rate_for_model(model, train_docs, test_docs)
error_rates[str(model)] = err_rate
print(f"\n{err_rate} {model}\n")
print("Err_rate Model")
for rate, name in sorted((rate, name) for name, rate in error_rates.items()):
print(f"{rate} {name}")
doc_id = np.random.randint(len(simple_models[0].dv)) # Pick random doc; re-run cell for more examples
print(f'for doc {doc_id}...')
for model in simple_models:
inferred_docvec = model.infer_vector(alldocs[doc_id].words)
print(f'{model}:\n {model.dv.most_similar([inferred_docvec], topn=3)}')
import random
doc_id = np.random.randint(len(simple_models[0].dv)) # pick random doc, re-run cell for more examples
model = random.choice(simple_models) # and a random model
sims = model.dv.most_similar(doc_id, topn=len(model.dv)) # get *all* similar documents
print(f'TARGET ({doc_id}): «{" ".join(alldocs[doc_id].words)}»\n')
print(f'SIMILAR/DISSIMILAR DOCS PER MODEL {model}%s:\n')
for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
s = sims[index]
i = sims[index][0]
words = ' '.join(alldocs[i].words)
print(f'{label} {s}: «{words}»\n')
import random
word_models = simple_models[:]
def pick_random_word(model, threshold=10):
# pick a random word with a suitable number of occurences
while True:
word = random.choice(model.wv.index_to_key)
if model.wv.get_vecattr(word, "count") > threshold:
return word
target_word = pick_random_word(word_models[0])
# or uncomment below line, to just pick a word from the relevant domain:
# target_word = 'comedy/drama'
for model in word_models:
print(f'target_word: {repr(target_word)} model: {model} similar words:')
for i, (word, sim) in enumerate(model.wv.most_similar(target_word, topn=10), 1):
print(f' {i}. {sim:.2f} {repr(word)}')
print()
from gensim.test.utils import datapath
questions_filename = datapath('questions-words.txt')
# Note: this analysis takes many minutes
for model in word_models:
score, sections = model.wv.evaluate_word_analogies(questions_filename)
correct, incorrect = len(sections[-1]['correct']), len(sections[-1]['incorrect'])
print(f'{model}: {float(correct*100)/(correct+incorrect):0.2f}%% correct ({correct} of {correct+incorrect}')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Detect Beats
Step2: Load a fictional reference annotation.
Step3: Plot the estimated and reference beats together.
Step4: Evaluate
Step5: Example
Step6: Hidden benefits
Step7: Common plots
Step8: Example
|
<ASSISTANT_TASK:>
Python Code:
y, sr = librosa.load('audio/prelude_cmaj.wav')
ipd.Audio(y, rate=sr)
est_tempo, est_beats = librosa.beat.beat_track(y=y, sr=sr, bpm=120)
est_beats = librosa.frames_to_time(est_beats, sr=sr)
est_beats
ref_beats = numpy.array([0, 0.50, 1.02, 1.53, 1.99, 2.48, 2.97,
3.43, 3.90, 4.41, 4.89, 5.38,
5.85, 6.33, 6.82, 7.29, 7.70])
D = librosa.stft(y)
S = abs(D)
S_db = librosa.amplitude_to_db(S)
librosa.display.specshow(S_db, sr=sr, x_axis='time', y_axis='log')
plt.ylim(0, 8192)
plt.vlines(est_beats, 0, 8192, color='#00ff00')
plt.scatter(ref_beats, 5000*numpy.ones_like(ref_beats), color='k', s=100)
mir_eval.beat.evaluate(ref_beats, est_beats)
mir_eval.chord.evaluate()
import librosa.display
import mir_eval.display
librosa.display.specshow(S, x_axis='time', y_axis='mel')
mir_eval.display.events(ref_beats, color='w', alpha=0.8, linewidth=3)
mir_eval.display.events(est_beats, color='c', alpha=0.8, linewidth=3, linestyle='--')
y_harm, y_perc = librosa.effects.hpss(y, margin=8)
plt.figure(figsize=(12, 4))
mir_eval.display.separation([y_perc, y_harm], sr, labels=['percussive', 'harmonic'])
plt.legend()
Audio(data=numpy.vstack([
mir_eval.sonify.chords()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Bootstrap
Step2: Jackknife
Step3: Hypothesis Testing
Step4: Benjamini and Hochberg Method
Step5: 4.7 - Comparing Distributions
Step6: U test -
Step7: Parametric Methods
Step8: In order the use the method, we must determine if f(x,y) is separable, that is $f(x,y) = \psi(x)\rho(y)$. To do this, we follow the following procedure
Step9: Since $\tau \lt 1$, we see that x and y are independent. We now define the cumulative functions $\Phi(x) = \int\limits_{-\infty}^{x} \psi(x') dx'$ and $\Sigma(y) = \int\limits_{-\infty}^{y} \rho(y') dy'$. The Lyndel-Bell paper showed that $\Phi(x_i) = \Phi(x_1) \prod\limits_{k=2}^{i} (1+1/N_k)$, definied on a grid of unequal spacing given by {$x_i$}. Here we requre {$x_i$} to be sorted.
Step10: To get the differential distribution function, we interpolate and bin along the x axis
Step11: To find the distribution in y, we find $J_k$ such that every point in the set has $x_j \lt x_{max,i}$ and $y_j \lt y_{i}$, and the counts $M_k$ in the $J_k$s
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import scipy.stats
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
from astroML.resample import bootstrap
from astroML.stats import sigmaG
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=15, usetex=True)
m = 1000 # number of points
n = 10000 # number of bootstraps
#------------------------------------------------------------
# sample values from a normal distribution
np.random.seed(123)
data = norm(0, 1).rvs(m)
#------------------------------------------------------------
# Compute bootstrap resamplings of data
mu1_bootstrap = bootstrap(data, n, np.std, kwargs=dict(axis=1, ddof=1))
mu2_bootstrap = bootstrap(data, n, sigmaG, kwargs=dict(axis=1))
#------------------------------------------------------------
# Compute the theoretical expectations for the two distributions
x = np.linspace(0.8, 1.2, 1000)
sigma1 = 1. / np.sqrt(2 * (m - 1))
pdf1 = norm(1, sigma1).pdf(x)
sigma2 = 1.06 / np.sqrt(m)
pdf2 = norm(1, sigma2).pdf(x)
#------------------------------------------------------------
# Plot the results
fig, ax = plt.subplots(figsize=(5*2, 3.75*2))
ax.hist(mu1_bootstrap, bins=50, normed=True, histtype='step',
color='blue', ls='dashed', label=r'$\sigma\ {\rm (std. dev.)}$')
ax.plot(x, pdf1, color='gray')
ax.hist(mu2_bootstrap, bins=50, normed=True, histtype='step',
color='red', label=r'$\sigma_G\ {\rm (quartile)}$')
ax.plot(x, pdf2, color='gray')
ax.set_xlim(0.82, 1.18)
ax.set_xlabel(r'$\sigma$',)
ax.set_ylabel(r'$p(\sigma|x,I)$')
ax.legend()
plt.show()
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=15, usetex=True)
#------------------------------------------------------------
# sample values from a normal distribution
np.random.seed(123)
m = 1000 # number of points
data = norm(0, 1).rvs(m)
#------------------------------------------------------------
# Compute jackknife resamplings of data
from astroML.resample import jackknife
from astroML.stats import sigmaG
# mu1 is the mean of the standard-deviation-based width
mu1, sigma_mu1, mu1_raw = jackknife(data, np.std,
kwargs=dict(axis=1, ddof=1),
return_raw_distribution=True)
pdf1_theory = norm(1, 1. / np.sqrt(2 * (m - 1)))
pdf1_jackknife = norm(mu1, sigma_mu1)
# mu2 is the mean of the interquartile-based width
# WARNING: do not use the following in practice. This example
# shows that jackknife fails for rank-based statistics.
mu2, sigma_mu2, mu2_raw = jackknife(data, sigmaG,
kwargs=dict(axis=1),
return_raw_distribution=True)
pdf2_theory = norm(data.std(), 1.06 / np.sqrt(m))
pdf2_jackknife = norm(mu2, sigma_mu2)
print mu2, sigma_mu2
#------------------------------------------------------------
# plot the results
print "mu_1 mean: %.2f +- %.2f" % (mu1, sigma_mu1)
print "mu_2 mean: %.2f +- %.2f" % (mu2, sigma_mu2)
fig = plt.figure(figsize=(5*2, 2*2))
fig.subplots_adjust(left=0.11, right=0.95, bottom=0.2, top=0.9,
wspace=0.25)
ax = fig.add_subplot(121)
ax.hist(mu1_raw, np.linspace(0.996, 1.008, 100),
label=r'$\sigma^*\ {\rm (std.\ dev.)}$',
histtype='stepfilled', fc='white', normed=False)
ax.hist(mu2_raw, np.linspace(0.996, 1.008, 100),
label=r'$\sigma_G^*\ {\rm (quartile)}$',
histtype='stepfilled', fc='gray', normed=False)
ax.legend(loc='upper left', handlelength=2)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.004))
ax.set_xlabel(r'$\sigma^*$')
ax.set_ylabel(r'$N(\sigma^*)$')
ax.set_xlim(0.998, 1.008)
ax.set_ylim(0, 550)
ax = fig.add_subplot(122)
x = np.linspace(0.45, 1.15, 1000)
ax.plot(x, pdf1_jackknife.pdf(x),
color='blue', ls='dashed', label=r'$\sigma\ {\rm (std.\ dev.)}$',
zorder=2)
ax.plot(x, pdf1_theory.pdf(x), color='gray', zorder=1)
ax.plot(x, pdf2_jackknife.pdf(x),
color='red', label=r'$\sigma_G\ {\rm (quartile)}$', zorder=2)
ax.plot(x, pdf2_theory.pdf(x), color='gray', zorder=1, label='Theory')
plt.legend(loc='upper left', handlelength=2)
ax.set_xlabel(r'$\sigma$')
ax.set_ylabel(r'$p(\sigma|x,I)$')
ax.set_xlim(0.45, 1.15)
ax.set_ylim(0, 24)
plt.show()
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=15, usetex=True)
#------------------------------------------------------------
# Generate and draw the curves
x = np.linspace(50, 200, 1000)
p1 = 0.9 * norm(100, 10).pdf(x)
p2 = 0.1 * norm(150, 12).pdf(x)
fig, ax = plt.subplots(figsize=(5*2, 3.75*2))
ax.fill(x, p1, ec='k', fc='#AAAAAA', alpha=0.5)
ax.fill(x, p2, '-k', fc='#AAAAAA', alpha=0.5)
ax.plot([120, 120], [0.0, 0.04], '--k')
ax.text(100, 0.036, r'$h_B(x)$', ha='center', va='bottom')
ax.text(150, 0.0035, r'$h_S(x)$', ha='center', va='bottom')
ax.text(122, 0.039, r'$x_c=120$', ha='left', va='top')
ax.text(125, 0.01, r'$(x > x_c\ {\rm classified\ as\ sources})$')
ax.set_xlim(50, 200)
ax.set_ylim(0, 0.04)
ax.set_xlabel('$x$')
ax.set_ylabel('$p(x)$')
plt.show()
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=15, usetex=True)
#------------------------------------------------------------
# Set up the background and foreground distributions
background = norm(100, 10)
foreground = norm(150, 12)
f = 0.1
# Draw from the distribution
N = 1E6
X = np.random.random(N)
mask = (X < 0.1)
X[mask] = foreground.rvs(np.sum(mask))
X[~mask] = background.rvs(np.sum(~mask))
#------------------------------------------------------------
# Perform Benjamini-Hochberg method
p = 1 - background.cdf(X)
p_sorted = np.sort(p)
#------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5*2, 3.75*2))
fig.subplots_adjust(bottom=0.15)
ax = plt.axes(xscale='log', yscale='log')
# only plot every 1000th; plotting all 1E6 takes too long
ax.plot(p_sorted[::1000], np.linspace(0, 1, 1000), '-k')
ax.plot(p_sorted[::1000], p_sorted[::1000], ':k', lw=1)
# plot the cutoffs for various values of expsilon
p_reg_over_eps = 10 ** np.linspace(-3, 0, 100)
for (i, epsilon) in enumerate([0.1, 0.01, 0.001, 0.0001]):
x = p_reg_over_eps * epsilon
y = p_reg_over_eps
ax.plot(x, y, '--k')
ax.text(x[1], y[1],
r'$\epsilon = %.1g$' % epsilon,
ha='center', va='bottom', rotation=70)
ax.xaxis.set_major_locator(plt.LogLocator(base=100))
ax.set_xlim(1E-12, 1)
ax.set_ylim(1E-3, 1)
ax.set_xlabel('$p = 1 - H_B(i)$')
ax.set_ylabel('normalized $C(p)$')
plt.show()
#1-sample KS test
N1 = 100
vals1 = np.random.normal(loc = 0,scale = 1,size = N1)
x1 = np.sort(vals1);
y1 = np.arange(0.,N1)/N1
plt.figure(figsize = (10,10))
plt.plot(x1,y1,'b-',lw = 3)
D,p = scipy.stats.kstest(vals1,"norm")
plt.text(-3,0.9,'D= '+str(D)[:5],fontsize = 24)
plt.text(-3,0.8,'p= '+str(p)[:5],fontsize = 24)
plt.xlim(-3.5,3.5);
#2 sample KS test:
#drawing from a normal distribution
N1 = 1000
vals1 = np.random.normal(loc = 0,scale = 1,size = N1)
x1 = np.sort(vals1)
y1 = np.arange(0.,N1)/N1
#drawing from a uniform distribution
N2 = 1000
vals2 = np.random.rand(N2)*4-2
x2 = np.sort(vals2)
y2 = np.arange(0.,N2)/N2
#plotting and KS test
plt.figure(figsize = (10,10))
plt.plot(x1,y1,'b-',lw = 3)
plt.plot(x2,y2,'g--',lw = 3)
D,p = scipy.stats.ks_2samp(vals1,vals2)
plt.text(-3,0.9,'D= '+str(D)[:5],fontsize = 24)
if str(p)[-4]=='e':
plt.text(-3,0.8,'p= '+str(p)[:4]+str(p)[-4:],fontsize = 24)
else:
plt.text(-3,0.8,'p= '+str(p)[:6],fontsize = 24)
plt.xlim(-3.5,3.5);
#Drawing from a GMM
from sklearn.mixture import GMM
N1=1000
np.random.seed(1)
gmm = GMM(3, n_iter=1)
gmm.means_ = np.array([[-1], [0], [1.5]])
gmm.covars_ = np.array([[1.5], [1], [0.5]]) ** 2
gmm.weights_ = np.array([0.1, 0.8, 0.1])
vals1 = gmm.sample(N1).T[0]
x1 = np.sort(vals1)
y1 = np.arange(0.,N1)/N1
#Drawing from a normal distribution
N2 = 100000
vals2 = np.random.normal(loc = 0,scale = 1,size = N2)
x2 = np.sort(vals2)
y2 = np.arange(0.,N2)/N2
#plotting and KS test
plt.figure(figsize = (10,10))
plt.plot(x1,y1,'b-',lw = 3)
plt.plot(x2,y2,'g--',lw = 3)
D,p = scipy.stats.ks_2samp(vals1,vals2)
plt.text(-3,0.9,'D= '+str(D)[:5],fontsize = 24)
if str(p)[-4]=='e':
plt.text(-3,0.8,'p= '+str(p)[:4]+str(p)[-4:],fontsize = 24)
else:
plt.text(-3,0.8,'p= '+str(p)[:6],fontsize = 24)
plt.xlim(-3.5,3.5);
#Drawing from a GMM
from sklearn.mixture import GMM
N1=100
np.random.seed(1)
gmm = GMM(3, n_iter=1)
gmm.means_ = np.array([[-1], [0.5], [1.5]])
gmm.covars_ = np.array([[1.5], [1], [0.5]]) ** 2
gmm.weights_ = np.array([0.1, 0.8, 0.1])
vals1 = gmm.sample(N1).T[0]
x1 = np.sort(vals1)
y1 = np.arange(0.,N1)/N1
#Drawing from a normal distribution
N2 = 100
vals2 = np.random.normal(loc = 0,scale = 1,size = N2)
x2 = np.sort(vals2)
y2 = np.arange(0.,N2)/N2
#plotting and U test
plt.figure(figsize = (10,10))
plt.plot(x1,y1,'b-',lw = 3)
plt.plot(x2,y2,'g--',lw = 3)
U,p = scipy.stats.mannwhitneyu(vals2,vals1)
s = str(U)
s1 = s.index('.')
plt.text(-3,0.9,'U= '+str(U)[:s1+2],fontsize = 24)
plt.text(-3,0.8,r'$\mu_U$ = '+str(N1*N2/2),fontsize = 24)
if str(p)[-4]=='e':
plt.text(-3,0.7,'p= '+str(p)[:4]+str(p)[-4:],fontsize = 24)
else:
plt.text(-3,0.7,'p= '+str(p)[:6],fontsize = 24)
plt.xlim(-3.5,3.5);
x_true = np.random.normal(5,3,10000)
y_true = np.random.normal(5,4,10000)
plt.figure(figsize=(10,10))
plt.plot(x_true,y_true,'k,')
plt.xlim(-10,20)
plt.ylim(-15,25)
plt.title('True Distribution')
selection_fn = y_true<12-x_true
x=x_true[selection_fn]
y=y_true[selection_fn]
plt.figure(figsize=(10,10))
plt.plot(x,y,'k,')
plt.xlim(-10,20)
plt.ylim(-15,25)
plt.title('Observed Distribution')
R,N = [],[]
for i in range(len(x)):
y_max = 12-x[i]
sel_J = np.array([(x[ind] <= x[i])&(y[ind] < y_max) for ind in range(len(x))])
x_j,y_j = x[sel_J],y[sel_J]
if i ==0:
plt.figure(figsize=(10,10))
plt.plot(x_j,y_j,'k,')
plt.scatter([x[i]],[y[i]],s=49)
plt.xlim(-10,20)
plt.ylim(-15,25)
y_js = np.sort(y_j)
R_i = list(y_js).index(y[i])+1
N_i = len(y_js)
R.append(R_i)
N.append(N_i)
tau = sum(np.array(R)*1. -np.array(N)/2.)/np.sqrt(sum((np.array(N)**2)/12))
print tau
argy=argsort(x)
x_s =x[argy]
N_s =np.array(N)[argy]
Nk=1.+1./N_s
Nk[0] = 1
phi = np.array([prod(Nk[:i]) for i in range(len(Nk))])
phi = phi/phi[-1]
#for i in np.arange(len(x)-1)+1:
plt.figure(figsize = (10,10))
plt.plot(x_s,phi)
plt.xlabel('x');
plt.ylabel(r'$\Phi$');
yp = np.arange(0,1,.0001)
xp = np.interp(yp,phi,x_s)
plt.figure(figsize = (10,10))
plt.hist(xp,normed = 1,histtype = 'step',label = 'Lyndel-Bell $C^-$',bins = 20,lw=3);
plt.hist(x,normed = 1,histtype = 'step',label = 'Observed',bins =20,lw=3);
plt.hist(x_true,normed = 1,histtype = 'step',label = 'True',bins =20,lw=3);
plt.xlabel('x');
plt.legend(loc = 2);
Rk,M = [],[]
for i in range(len(y)):
x_max = 12-y[i]
sel_J = np.array([(x[ind] <= x_max)&(y[ind] < y[i]) for ind in range(len(y))])
x_j,y_j = x[sel_J],y[sel_J]
if i ==0:
plt.figure(figsize=(10,10))
plt.plot(x_j,y_j,'k,')
plt.scatter([x[i]],[y[i]],s=49)
plt.xlim(-10,20)
plt.ylim(-15,25)
M_k = len(y_j)
M.append(M_k)
argy=argsort(y)
y_s =y[argy]
M_s =np.array(M)[argy]
Mk=1.+1./M_s
Mk[0] = 1
sigma= np.array([prod(Mk[:i]) for i in range(len(Nk))])
sigma = sigma/sigma[-1]
#for i in np.arange(len(x)-1)+1:
plt.figure(figsize = (10,10))
plt.plot(y_s,sigma)
plt.xlabel('y');
plt.ylabel(r'$\Sigma$');
yp = np.arange(0,1,.0001)
xp = np.interp(yp,sigma,y_s)
plt.figure(figsize = (10,10))
plt.hist(xp,normed = 1,histtype = 'step',label = 'Lyndel-Bell $C^-$',bins = 20,lw=3);
plt.hist(y,normed = 1,histtype = 'step',label = 'Observed',bins =20,lw=3);
plt.hist(y_true,normed = 1,histtype = 'step',label = 'True',bins =20,lw=3);
plt.xlabel('y');
plt.legend(loc = 2);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Lab Task #1
Step3: The source dataset
Step4: Create the training and evaluation data tables
Step5: Lab Task #3
Step6: Lab Task #4
Step7: Split augmented dataset into eval dataset
Step8: Verify table creation
Step9: Lab Task #5
Step10: Verify CSV creation
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
!pip install --user google-cloud-bigquery==1.25.0
import os
from google.cloud import bigquery
%%bash
export PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
# TODO: Change environment variables
PROJECT = "cloud-training-demos" # REPLACE WITH YOUR PROJECT NAME
BUCKET = "BUCKET" # REPLACE WITH YOUR BUCKET NAME, DEFAULT BUCKET WILL BE PROJECT ID
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# Do not change these
os.environ["BUCKET"] = PROJECT if BUCKET == "BUCKET" else BUCKET # DEFAULT BUCKET WILL BE PROJECT ID
os.environ["REGION"] = REGION
if PROJECT == "cloud-training-demos":
print("Don't forget to update your PROJECT name! Currently:", PROJECT)
%%bash
## Create a BigQuery dataset for babyweight if it doesn't exist
datasetexists=$(bq ls -d | grep -w # TODO: Add dataset name)
if [ -n "$datasetexists" ]; then
echo -e "BigQuery dataset already exists, let's not recreate it."
else
echo "Creating BigQuery dataset titled: babyweight"
bq --location=US mk --dataset \
--description "Babyweight" \
$PROJECT:# TODO: Add dataset name
echo "Here are your current datasets:"
bq ls
fi
## Create GCS bucket if it doesn't exist already...
exists=$(gsutil ls -d | grep -w gs://${BUCKET}/)
if [ -n "$exists" ]; then
echo -e "Bucket exists, let's not recreate it."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${BUCKET}
echo "Here are your current buckets:"
gsutil ls
fi
%%bigquery
CREATE OR REPLACE TABLE
babyweight.babyweight_data AS
SELECT
# TODO: Add selected raw features and preprocessed features
FROM
publicdata.samples.natality
WHERE
# TODO: Add filters
%%bigquery
CREATE OR REPLACE TABLE
babyweight.babyweight_augmented_data AS
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
hashmonth
FROM
babyweight.babyweight_data
UNION ALL
SELECT
# TODO: Replace is_male and plurality as indicated above
FROM
babyweight.babyweight_data
%%bigquery
CREATE OR REPLACE TABLE
babyweight.babyweight_data_train AS
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks
FROM
babyweight.babyweight_augmented_data
WHERE
# TODO: Modulo hashmonth to be approximately 75% of the data
%%bigquery
CREATE OR REPLACE TABLE
babyweight.babyweight_data_eval AS
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks
FROM
babyweight.babyweight_augmented_data
WHERE
# TODO: Modulo hashmonth to be approximately 25% of the data
%%bigquery
-- LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT * FROM babyweight.babyweight_data_train
LIMIT 0
%%bigquery
-- LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT * FROM babyweight.babyweight_data_eval
LIMIT 0
# Construct a BigQuery client object.
client = bigquery.Client()
dataset_name = # TODO: Add dataset name
# Create dataset reference object
dataset_ref = client.dataset(
dataset_id=dataset_name, project=client.project)
# Export both train and eval tables
for step in [# TODO: Loop over train and eval]:
destination_uri = os.path.join(
"gs://", BUCKET, dataset_name, "data", "{}*.csv".format(step))
table_name = "babyweight_data_{}".format(step)
table_ref = dataset_ref.table(table_name)
extract_job = client.extract_table(
table_ref,
destination_uri,
# Location must match that of the source table.
location="US",
) # API request
extract_job.result() # Waits for job to complete.
print("Exported {}:{}.{} to {}".format(
client.project, dataset_name, table_name, destination_uri))
%%bash
gsutil ls gs://${BUCKET}/babyweight/data/*.csv
%%bash
gsutil cat gs://${BUCKET}/babyweight/data/train000000000000.csv | head -5
%%bash
gsutil cat gs://${BUCKET}/babyweight/data/eval000000000000.csv | head -5
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enoncé 1
Step2: Comme il n'y a pas d'instruction return, la fonction retourne toujours None quelque chose le résultat de ce qu'elle calcule.
Step3: Q2
Step4: Si on affiche les résultats intermédiaires
Step5: Q3
Step6: Il suffit de remplacer i par 4. x vaut chr(97+4) et on se déplace de 4 lettres dans l'alphabet, soit e.
Step7: Le programme commence par ajouter la clé Claire au dictionnaire. La variable a mémorise la valeur numérique la plus grande. En l'état, le résultat programme est assez imprévisible puisqu'il dépend de l'ordre dans lequel on parcourt les éléments. Je pense que la fonction devrait récupérer dans une liste l'ensemble des prénoms correspondant à cette valeur maximale s'il était écrit comme ceci
Step8: Q5
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
def make_squares(n):
squares = [i**2 for i in range(n)]
def make_squares(n):
squares = [i**2 for i in range(n)]
print ( make_squares(2) )
s = 1
a = 0
for i in range(4):
a += s
s += 2
a
s = 1
a = 0
for i in range(4):
print(a,s)
a += s
s += 2
a
d = {i:chr(i+97) for i in range(10)}
x = d[4]
x
notes = { "Alice": 17, "Bob": 18, "Jean−Ma": 17 }
notes['Claire'] = 18
def mystere(d):
a = 0
b = []
for k,v in d.items():
if v >= a:
a = v
b.append(k)
return (b,a)
print(mystere(notes))
notes
notes = { "Alice": 17, "Bob": 18, "Jean−Ma": 17 }
notes['Claire'] = 18
def mystere(d):
a = 0
b = []
for k,v in d.items():
if v == a:
b.append(k)
elif v > a:
a = v
b = [ k ]
return (b,a)
print(mystere(notes))
def f(n):
while n != 1:
if n%2 == 0:
n = n/2
else:
n = 3*n + 1
return n
f(3)
f(4)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The above code reads in preprocessed email archive data. These mailing lists are from a variety of different sources
Step2: Now we have processed the mailing lists into interaction graphs based on replies. This is what those graphs look like
Step3: Well, that didn't work out so well...
|
<ASSISTANT_TASK:>
Python Code:
from bigbang.archive import Archive
urls = [#"analytics",
"conferences",
"design",
"education",
"gendergap",
"historic",
"hot",
"ietf-privacy",
"ipython-dev",
"ipython-user",
"languages",
"maps-l",
"numpy-discussion",
"playground",
"potlatch-dev",
"python-committers",
"python-dev",
"scipy-dev",
"scipy-user",
"social-media",
"spambayes",
#"wikien-l",
"wikimedia-l"]
archives= [(url,Archive(url,archive_dir="../archives")) for url in urls]
archives = dict(archives)
import bigbang.graph as graph
igs = dict([(k,graph.messages_to_interaction_graph(v.data)) for (k,v) in archives.items()])
igs
import networkx as nx
def draw_interaction_graph(ig):
pos = nx.graphviz_layout(ig,prog='neato')
node_size = [data['sent'] * 4 for name,data in ig.nodes(data=True)]
nx.draw(ig,
pos,
node_size = node_size,
node_color = 'b',
alpha = 0.4,
font_size=18,
font_weight='bold'
)
# edge width is proportional to replies sent
edgewidth=[d['weight'] for (u,v,d) in ig.edges(data=True)]
#overlay edges with width based on weight
nx.draw_networkx_edges(ig,pos,alpha=0.5,width=edgewidth,edge_color='r')
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure(550,figsize=(12.5, 7.5))
for ln,ig in igs.items():
print ln
try:
plt.subplot(550 + i)
#print nx.degree_assortativity_coefficient(ig)
draw_interaction_graph(ig)
except:
print 'plotting failure'
plt.show()
for ln,ig in igs.items():
print ln, len(ig.nodes()), nx.degree_assortativity_coefficient(ig,weight='weight')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Block the output of all cores except for one
Step2: Define an md.export_cfg object
Step3: Asymptotic Displacement Field of Crack from Linear Elasticity
Step4: Configuration
Step5: Create a $[\bar{1}10]\times[111]\times[11\bar{2}]$ cell
Step6: Remove half of the atoms and readjust the position of remaining
Step7: Readjust the postions
Step8: Replicating the unit cell
Step9: Add vacuum
Step10: Get the displacement field for this configuration
Step11: Impose the diplacement field and other boundary conditions
Step12: assign intial velocities
Step13: add hydrogen to the system
Step14: define ensemble
Step15: run gcmc
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import mapp4py
from mapp4py import md
from lib.elasticity import rot, cubic, resize, displace, crack
from mapp4py import mpi
if mpi().rank!=0:
with open(os.devnull, 'w') as f:
sys.stdout = f;
xprt = md.export_cfg("");
_ = np.array([[-1,1,0],[1,1,1],[1,1,-2]], dtype=np.float);
Q = np.linalg.inv(np.sqrt(_ @ _.T)) @ _;
C = rot(cubic(1.3967587463636366,0.787341583191591,0.609615090769241),Q)
B = np.linalg.inv(
np.array([
[C[0, 0, 0, 0], C[0, 0, 1, 1], C[0, 0, 0, 1]],
[C[0, 0, 1, 1], C[1, 1, 1, 1], C[1, 1, 0, 1]],
[C[0, 0, 0, 1], C[1, 1, 0, 1], C[0, 1, 0, 1]]
]
))
_ = np.roots([B[0, 0], -2.0*B[0, 2],2.0*B[0, 1]+B[2, 2], -2.0*B[1, 2], B[1, 1]])
mu = np.array([_[0],0.0]);
if np.absolute(np.conjugate(mu[0]) - _[1]) > 1.0e-12:
mu[1] = _[1];
else:
mu[1] = _[2]
alpha = np.real(mu);
beta = np.imag(mu);
p = B[0,0] * mu**2 - B[0,2] * mu + B[0, 1]
q = B[0,1] * mu - B[0, 2] + B[1, 1]/ mu
K = np.stack([p, q]) * np.array(mu[1], mu[0]) /(mu[1] - mu[0])
K_r = np.real(K)
K_i = np.imag(K)
Tr = np.stack([
np.array(np.array([[1.0, alpha[0]], [0.0, beta[0]]])),
np.array([[1.0, alpha[1]], [0.0, beta[1]]])
], axis=1)
def u_f0(x): return np.sqrt(np.sqrt(x[0] * x[0] + x[1] * x[1]) + x[0])
def u_f1(x): return np.sqrt(np.sqrt(x[0] * x[0] + x[1] * x[1]) - x[0]) * np.sign(x[1])
def disp(x):
_ = Tr @ x
return K_r @ u_f0(_) + K_i @ u_f1(_)
n = 300;
r = 10;
disp_scale = 0.3;
n0 = int(np.round(n/ (1 +np.pi), ))
n1 = n - n0
xs = np.concatenate((
np.stack([np.linspace(0, -r , n0), np.full((n0,), -1.e-8)]),
r * np.stack([np.cos(np.linspace(-np.pi, np.pi , n1)),np.sin(np.linspace(-np.pi, np.pi , n1))]),
np.stack([np.linspace(-r, 0 , n0), np.full((n0,), 1.e-8)]),
), axis =1)
xs_def = xs + disp_scale * disp(xs)
fig, ax = plt.subplots(figsize=(10.5,5), ncols = 2)
ax[0].plot(xs[0], xs[1], "b-", label="non-deformed");
ax[1].plot(xs_def[0], xs_def[1], "r-.", label="deformed");
sim = md.atoms.import_cfg("configs/Fe_300K.cfg");
a = sim.H[0][0]
sim.cell_change([[-1,1,0],[1,1,1],[1,1,-2]])
H = np.array(sim.H);
def _(x):
if x[1] > 0.5*H[1, 1] - 1.0e-8:
return False;
else:
x[1] *= 2.0;
sim.do(_);
_ = np.full((3,3), 0.0)
_[1, 1] = -0.5
sim.strain(_)
H = np.array(sim.H);
displace(sim,np.array([sim.H[0][0]/6.0, sim.H[1][1]/6.0, sim.H[2][2]/6.0]))
max_natms=100000
H=np.array(sim.H);
n_per_area=sim.natms/(H[0,0] * H[1,1]);
_ =np.sqrt(max_natms/n_per_area);
N0 = np.array([
np.around(_ / sim.H[0][0]),
np.around(_ / sim.H[1][1]),
1], dtype=np.int32)
# make sure in 1 direction it is an even number
if N0[1] % 2 == 1:
N0[1] += 1
sim *= N0;
vaccum = 100.0
H = np.array(sim.H);
H_new = np.array(sim.H);
H_new[0][0] += vaccum
H_new[1][1] += vaccum
resize(sim, H_new, H.sum(axis=0) * 0.5)
_ = np.array([[-1,1,0],[1,1,1],[1,1,-2]], dtype=np.float);
Q = np.linalg.inv(np.sqrt(_ @ _.T)) @ _;
C = rot(cubic(1.3967587463636366,0.787341583191591,0.609615090769241),Q)
disp = crack(C)
fixed_layer_thickness = 20.0
intensity = 0.5
rate = 0.001
H = np.array(sim.H);
ctr = H.sum(axis=0) * 0.5
lim = np.array([H[0, 0], H[1, 1]])
lim -= vaccum;
lim *= 0.5
lim -= fixed_layer_thickness
def _(x, x_d, x_dof):
x_rel = x[:2] - ctr[:2]
u = disp(x_rel)
x[:2] += intensity * u
if (np.abs(x_rel) < lim).sum() != 2:
x_d[:2] = rate * u
x_dof[0] = False;
x_dof[1] = False;
sim.do(_)
md.export_cfg("", extra_vecs=["x_dof"] )(sim, "dumps/crack.cfg")
sim.kB = 8.617330350e-5
sim.hP = 4.13566766225 * 0.1 * np.sqrt(1.60217656535/1.66053904020)
sim.create_temp(300.0, 846244)
sim.add_elem('H',1.007940)
# GPa and Kelvin
def mu(p,T):
return -2.37+0.0011237850013293155*T+0.00004308665175*T*np.log(p)-0.000193889932875*T*np.log(T);
muvt = md.muvt(mu(1.0e-3,300.0), 300.0, 0.1, 'H', 73108204);
muvt.nevery = 100;
muvt.nattempts=40000;
muvt.ntally=1000;
muvt.export=md.export_cfg('dumps/dump',10000)
muvt.run(sim,100000);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: So, our linear model seems to be quite a bit better than using just the mean mpg.
Step2: Which $\lambda$ value is best? Careful. What is the best value of $\lambda$ if just comparing error on training data?
Step3: Now we can train our model using several values of $\lambda$ on Xtrain,Train and calculate the model error on Xval,Tval. Then pick best value of $\lambda$ based on error on Xval,Tval. Finally, calculate error of model using best $\lambda$ on Xtest,Ttest as our estimate of error on new data.
Step4: Typical use of these partitions is shown below. It is most handy to just collect all results in a matrix and calculate averages afterwards, rather than accumulating each result and dividing by the number of repetitions at the end.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
np.any?
def makeMPGData(filename='auto-mpg.data'):
def missingIsNan(s):
return np.nan if s == b'?' else float(s)
data = np.loadtxt(filename, usecols=range(8), converters={3: missingIsNan})
print("Read",data.shape[0],"rows and",data.shape[1],"columns from",filename)
goodRowsMask = np.isnan(data).sum(axis=1) == 0
data = data[goodRowsMask,:]
print("After removing rows containing question marks, data has",data.shape[0],"rows and",data.shape[1],"columns.")
X = data[:,1:]
T = data[:,0:1]
Xnames = ['cylinders','displacement','horsepower','weight','acceleration','year','origin']
Tname = 'mpg'
return X,T,Xnames,Tname
X,T,Xnames,Tname = makeMPGData()
means = X.mean(0)
stds = X.std(0)
nRows = X.shape[0]
Xs1 = np.insert((X-means)/stds, 0, 1, axis=1) # insert column of ones in new 0th column
# Xs1 = np.hstack(( np.ones((nRows,1)), (X-means)/stds ))
w = np.linalg.lstsq(Xs1, T)[0]
w
# predict = Xs1.dot(w)
predict = Xs1 @ w
RSE = np.sum((T-predict)**2) / np.sum((T - T.mean(0))**2)
RSE
lamb = 0.1
D = Xs1.shape[1]
lambdaDiag = np.eye(D) * lamb
lambdaDiag[0,0] = 0
lambdaDiag
def makeLambda(D,lamb=0):
lambdaDiag = np.eye(D) * lamb
lambdaDiag[0,0] = 0
return lambdaDiag
makeLambda(3,0.2)
w = np.linalg.lstsq(Xs1.T @ Xs1 + lambdaDiag, Xs1.T @ T)[0]
w
D = Xs1.shape[1]
w1 = np.linalg.lstsq(Xs1.T @ Xs1 + makeLambda(D,0.1), Xs1.T @ T)[0]
w2 = np.linalg.lstsq(Xs1.T @ Xs1 + makeLambda(D,0), Xs1.T @ T)[0]
np.hstack((w1,w2))
for lamb in [0,0.1,1,10,100]:
w = np.linalg.lstsq(Xs1.T @ Xs1 + makeLambda(D,lamb), Xs1.T @ T)[0]
plt.plot(w)
lambdas = [0,0.1,1,10,100,1000]
for lamb in lambdas:
w = np.linalg.lstsq(Xs1.T @ Xs1 + makeLambda(D,lamb), Xs1.T @ T)[0]
plt.plot(Xs1[:30] @ w)
plt.plot(T[:30],'ro',lw=5,alpha=0.8)
plt.legend(lambdas,loc='best')
def partition(X,T,trainFraction=0.8, validateFraction=0.1, testFraction=0.1):
'''Usage: Xtrain,Ttrain,Xval,Tval,Xtest,Ttext = partition(X,T,0.8,0.2,0.2)'''
if trainFraction + validateFraction + testFraction != 1:
raise ValueError("Train, validate and test fractions must sum to 1. Given values sum to " + str(trainFraction+validateFraction+testFraction))
n = X.shape[0]
nTrain = round(trainFraction * n)
nValidate = round(validateFraction * n)
nTest = round(testFraction * n)
if nTrain + nValidate + nTest != n:
nTest = n - nTrain - nValidate
# Random order of data matrix row indices
rowIndices = np.arange(X.shape[0])
np.random.shuffle(rowIndices)
# Build X and T matrices by selecting corresponding rows for each partition
Xtrain = X[rowIndices[:nTrain],:]
Ttrain = T[rowIndices[:nTrain],:]
Xvalidate = X[rowIndices[nTrain:nTrain+nValidate],:]
Tvalidate = T[rowIndices[nTrain:nTrain+nValidate],:]
Xtest = X[rowIndices[nTrain+nValidate:nTrain+nValidate+nTest],:]
Ttest = T[rowIndices[nTrain+nValidate:nTrain+nValidate+nTest],:]
return Xtrain,Ttrain,Xvalidate,Tvalidate,Xtest,Ttest
X = np.arange(20).reshape((10,2))
X
T = np.arange(10).reshape((-1,1))
T
X = np.arange(20).reshape((10,2))
T = np.arange(10).reshape((-1,1))
Xtrain,Ttrain,Xval,Tval,Xtest,Ttest = partition(X,T,0.6,0.2,0.2)
print("Xtrain:")
print(Xtrain)
print(" Ttrain:")
print(Ttrain)
print("\n Xval:")
print(Xval)
print(" Tval:")
print(Tval)
print("\n Xtest:")
print(Xtest)
print(" Ttest:")
print(Ttest)
def count(n):
for a in range(n):
yield a
count(4)
list(count(4))
for i in count(5):
print(i)
zip?
def partitionKFolds(X,T,nFolds,shuffle=False,nPartitions=3):
'''Usage: for Xtrain,Ttrain,Xval,Tval,Xtest,Ttext in partitionKFolds(X,T,5):'''
# Randomly arrange row indices
rowIndices = np.arange(X.shape[0])
if shuffle:
np.random.shuffle(rowIndices)
# Calculate number of samples in each of the nFolds folds
nSamples = X.shape[0]
nEach = int(nSamples / nFolds)
if nEach == 0:
raise ValueError("partitionKFolds: Number of samples in each fold is 0.")
# Calculate the starting and stopping row index for each fold.
# Store in startsStops as list of (start,stop) pairs
starts = np.arange(0,nEach*nFolds,nEach)
stops = starts + nEach
stops[-1] = nSamples
startsStops = list(zip(starts,stops))
# Repeat with testFold taking each single fold, one at a time
for testFold in range(nFolds):
if nPartitions == 3:
# Repeat with validateFold taking each single fold, except for the testFold
for validateFold in range(nFolds):
if testFold == validateFold:
continue
# trainFolds are all remaining folds, after selecting test and validate folds
trainFolds = np.setdiff1d(range(nFolds), [testFold,validateFold])
# Construct Xtrain and Ttrain by collecting rows for all trainFolds
rows = []
for tf in trainFolds:
a,b = startsStops[tf]
rows += rowIndices[a:b].tolist()
Xtrain = X[rows,:]
Ttrain = T[rows,:]
# Construct Xvalidate and Tvalidate
a,b = startsStops[validateFold]
rows = rowIndices[a:b]
Xvalidate = X[rows,:]
Tvalidate = T[rows,:]
# Construct Xtest and Ttest
a,b = startsStops[testFold]
rows = rowIndices[a:b]
Xtest = X[rows,:]
Ttest = T[rows,:]
# Return partition matrices, then suspend until called again.
yield Xtrain,Ttrain,Xvalidate,Tvalidate,Xtest,Ttest,testFold
else:
# trainFolds are all remaining folds, after selecting test and validate folds
trainFolds = np.setdiff1d(range(nFolds), [testFold])
# Construct Xtrain and Ttrain by collecting rows for all trainFolds
rows = []
for tf in trainFolds:
a,b = startsStops[tf]
rows += rowIndices[a:b].tolist()
Xtrain = X[rows,:]
Ttrain = T[rows,:]
# Construct Xtest and Ttest
a,b = startsStops[testFold]
rows = rowIndices[a:b]
Xtest = X[rows,:]
Ttest = T[rows,:]
# Return partition matrices, then suspend until called again.
yield Xtrain,Ttrain,Xtest,Ttest,testFold
X = np.arange(20).reshape((10,2))
T = np.arange(10).reshape((-1,1))
k = 0
for Xtrain,Ttrain,Xval,Tval,Xtest,Ttest,testFold in partitionKFolds(X,T,5):
k += 1
print("Fold",k)
print(" Xtrain:")
print(Xtrain)
print(" Ttrain:")
print(Ttrain)
print("\n Xval:")
print(Xval)
print(" Tval:")
print(Tval)
print("\n Xtest:")
print(Xtest)
print(" Ttest:")
print(Ttest)
def train(X,T,lamb):
means = X.mean(0)
stds = X.std(0)
n,d = X.shape
Xs1 = np.insert( (X - means)/stds, 0, 1, axis=1)
lambDiag = np.eye(d+1) * lamb
lambDiag[0,0] = 0
w = np.linalg.lstsq( Xs1.T @ Xs1 + lambDiag, Xs1.T @ T)[0]
return {'w': w, 'means':means, 'stds':stds}
def use(X,model):
Xs1 = np.insert((X-model['means'])/model['stds'], 0, 1, axis=1)
return Xs1 @ model['w']
def rmse(A,B):
return np.sqrt(np.mean( (A-B)**2 ))
lambdas = [0,1,5,10,20]
results = []
for Xtrain,Ttrain,Xval,Tval,Xtest,Ttest,_ in partitionKFolds(X,T,5):
for lamb in lambdas:
model = train(Xtrain,Ttrain,lamb)
predict = use(Xval,model)
results.append([lamb,
rmse(use(Xtrain,model),Ttrain),
rmse(use(Xval,model),Tval),
rmse(use(Xtest,model),Ttest)])
results = np.array(results)
print(results)
print(results.shape)
# print(results)
avgresults = []
for lam in lambdas:
print(lam)
print(results[results[:,0]==lam,1:])
avgresults.append( [lam] + np.mean(results[results[:,0]==lam,1:],axis=0).tolist())
avgresults = np.array(avgresults)
print(avgresults)
plt.plot(avgresults[:,0],avgresults[:,1:],'o-')
plt.xlabel('$\lambda$')
plt.ylabel('RMSE')
plt.legend(('Train','Validate','Test'),loc='best');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Modell-Architektur
Step2: Sigmoid
Step3: Relu
Step4: The classic VGG16 Architecture
Step5: VGG starts with a number of convolutional blocks for feature extraction and ends with a fully connected classifier
Step6:
Step7:
Step8: What does the CNN "see"?
Step9: Visualizing feature channels using Quiver
Step10: Modern Alternative
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import matplotlib.pylab as plt
import numpy as np
from distutils.version import StrictVersion
import sklearn
print(sklearn.__version__)
assert StrictVersion(sklearn.__version__ ) >= StrictVersion('0.18.1')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
assert StrictVersion(tf.__version__) >= StrictVersion('1.1.0')
import keras
print(keras.__version__)
assert StrictVersion(keras.__version__) >= StrictVersion('2.0.0')
def centerAxis(uses_negative=False):
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
ax = plt.gca()
ax.spines['left'].set_position('center')
if uses_negative:
ax.spines['bottom'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def np_sigmoid(X):
return 1 / (1 + np.exp(X * -1))
x = np.arange(-10,10,0.01)
y = np_sigmoid(x)
centerAxis()
plt.plot(x,y,lw=3)
def np_relu(x):
return np.maximum(0, x)
x = np.arange(-10, 10, 0.01)
y = np_relu(x)
centerAxis()
plt.plot(x,y,lw=3)
def predict(model, img_path):
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])
from keras import applications
# applications.VGG16?
vgg16_model = applications.VGG16(weights='imagenet')
vgg16_model.summary()
!curl -O https://upload.wikimedia.org/wikipedia/commons/thumb/d/de/Beagle_Upsy.jpg/440px-Beagle_Upsy.jpg
predict(model = vgg16_model, img_path = '440px-Beagle_Upsy.jpg')
!curl -O https://djcordhose.github.io/ai/img/cat-bonkers.png
predict(model = vgg16_model, img_path = 'cat-bonkers.png')
!curl -O https://djcordhose.github.io/ai/img/squirrels/original/Michigan-MSU-raschka.jpg
!curl -O https://djcordhose.github.io/ai/img/squirrels/original/Black_New_York_stuy_town_squirrel_amanda_ernlund.jpeg
!curl -O https://djcordhose.github.io/ai/img/squirrels/original/london.jpg
predict(model = vgg16_model, img_path = 'Michigan-MSU-raschka.jpg')
predict(model = vgg16_model, img_path = 'Black_New_York_stuy_town_squirrel_amanda_ernlund.jpeg')
predict(model = vgg16_model, img_path = 'london.jpg')
# create a tmp dir in the local directory this notebook runs in, otherwise quiver will fail (and won't tell you why)
!rm -rf tmp
!mkdir tmp
# https://github.com/keplr-io/quiver
# Alternative with more styles of visualization: https://github.com/raghakot/keras-vis
# https://github.com/keplr-io/quiver
from quiver_engine import server
server.launch(vgg16_model, input_folder='.', port=7000)
# open at http://localhost:7000/
# interrupt kernel to return control to notebook
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
resnet_model = ResNet50(weights='imagenet')
resnet_model.summary()
predict(model = resnet_model, img_path = 'cat-bonkers.png')
predict(model = resnet_model, img_path = 'Michigan-MSU-raschka.jpg')
predict(model = resnet_model, img_path = 'Black_New_York_stuy_town_squirrel_amanda_ernlund.jpeg')
predict(model = resnet_model, img_path = 'london.jpg')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: The source dataset
Step3: Create the training data table
Step4: Verify table creation
Step5: Baseline Model
Step6: REMINDER
Step7: NOTE
Step8: Model 1
Step9: Once the training is done, visit the BigQuery Cloud Console and look at the model that has been trained. Then, come back to this notebook.
Step10: Here we run a SQL query to take the SQRT() of the mean squared error as your loss metric for evaluation for the benchmark_model.
Step11: Model 2
Step12: Evaluate the model.
Step13: Model 3
Step14: Next we evaluate the model.
|
<ASSISTANT_TASK:>
Python Code:
!pip install --user google-cloud-bigquery==1.25.0
# Installing the latest version of the package
import tensorflow as tf
print("TensorFlow version: ",tf.version.VERSION)
%%bash
export PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
%%bash
# Create a BigQuery dataset for feat_eng if it doesn't exist
datasetexists=$(bq ls -d | grep -w feat_eng)
if [ -n "$datasetexists" ]; then
echo -e "BigQuery dataset already exists, let's not recreate it."
else
echo "Creating BigQuery dataset titled: feat_eng"
bq --location=US mk --dataset \
--description 'Taxi Fare' \
$PROJECT:feat_eng
echo "\nHere are your current datasets:"
bq ls
fi
%%bigquery
CREATE OR REPLACE TABLE
feat_eng.feateng_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
passenger_count*1.0 AS passengers,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat
FROM
`nyc-tlc.yellow.trips`
WHERE
MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 10000) = 1
AND fare_amount >= 2.5
AND passenger_count > 0
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
%%bigquery
# LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT
*
FROM
feat_eng.feateng_training_data
LIMIT
0
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.baseline_model OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
pickup_datetime,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
feat_eng.feateng_training_data
%%bigquery
# Eval statistics on the held out data.
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.baseline_model)
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.baseline_model)
#TODO 1 - your code here
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_1 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
pickup_datetime,
#TODO 2 - Your code here
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
feat_eng.feateng_training_data
%%bigquery
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.model_1)
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_1)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_1)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_2 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
#TODO 3a -- Your code here
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
`feat_eng.feateng_training_data`
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_2)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_2)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_3 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
#TODO 3b -- Your code here
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
`feat_eng.feateng_training_data`
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_3)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_3)
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.