text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 1
Step1: Pagecounts
Step2: Get pagecounts of en.wikipedia.org through mobile.
Step3: Pageview
Step4: Get page viewcounts of en.wikipedia.org through mobile-web and mobile-app.
Step5: Step 2
Step6: Take a look at the dataframes.
Step7: Combine mobile app and mobile web views and create a new dataframe df_pageviews_mobile. Since both dataframes are of the same dimension and sorted the on timestamp, we can just add the values of views to get the total mobile views.
Step8: Prepare the four dataframes for merging on timestamp.
Step9: Save the df_total dataframe to csv file.
Step10: Step 3
Step11: Create a column 'date' that combines year, month, and day. Use it as the x axis later when plotting
Step12: Use Plotly to create an interactive visualization to present the page view information on English Wikipedia.
|
<ASSISTANT_TASK:>
Python Code:
import requests
import json
import pandas as pd
from pandas.io.json import json_normalize
import copy
from datetime import datetime
import plotly
import plotly.graph_objs as go
from plotly import tools
plotly.__version__
plotly.offline.init_notebook_mode(connected=True)
headers={'User-Agent' : 'https://github.com/mbzhuang', 'From' : 'mbzhuang@uw.edu'}
endpoint = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access}/{granularity}/{start}/{end}'
params = {'project' : 'en.wikipedia.org',
'access' : 'desktop-site',
'agent' : 'user',
'granularity' : 'monthly',
'start' : '2008010100',
'end' : '2016080100'}
api_call = requests.get(endpoint.format(**params))
response = api_call.json()
with open('pagecounts_desktop-site_200801-201607.json', 'w') as outfile:
json.dump(response, outfile)
params = {'project' : 'en.wikipedia.org',
'access' : 'mobile-site',
'agent' : 'user',
'granularity' : 'monthly',
'start' : '2008010100',
'end' : '2016080100'}
api_call = requests.get(endpoint.format(**params))
response = api_call.json()
with open('pagecounts_mobile-site_200801-201607.json', 'w') as outfile:
json.dump(response, outfile)
endpoint = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
params = {'project' : 'en.wikipedia.org',
'access' : 'desktop',
'agent' : 'user',
'granularity' : 'monthly',
'start' : '2015070100',
'end' : '2017100100'}
api_call = requests.get(endpoint.format(**params))
response = api_call.json()
with open('pageviews_desktop-site_201507-201709.json', 'w') as outfile:
json.dump(response, outfile)
params = {'project' : 'en.wikipedia.org',
'access' : 'mobile-web',
'agent' : 'user',
'granularity' : 'monthly',
'start' : '2015070100',
'end' : '2017100100'}
api_call = requests.get(endpoint.format(**params))
response = api_call.json()
with open('pageviews_mobile-web_201507-201709.json', 'w') as outfile:
json.dump(response, outfile)
params = {'project' : 'en.wikipedia.org',
'access' : 'mobile-app',
'agent' : 'user',
'granularity' : 'monthly',
'start' : '2015070100',
'end' : '2017100100'}
api_call = requests.get(endpoint.format(**params))
response = api_call.json()
with open('pageviews_mobile-app_201507-201709.json', 'w') as outfile:
json.dump(response, outfile)
def json_to_DataFrame(json_file):
with open(json_file, 'r') as infile:
json_content = json.load(infile)
DataFrame = json_normalize(json_content['items'])
return DataFrame
df_pagecounts_desktop = json_to_DataFrame("pagecounts_desktop-site_200801-201607.json")
df_pagecounts_mobile = json_to_DataFrame("pagecounts_mobile-site_200801-201607.json")
df_pageviews_desktop = json_to_DataFrame("pageviews_desktop-site_201507-201709.json")
df_pageviews_mobile_web = json_to_DataFrame("pageviews_mobile-web_201507-201709.json")
df_pageviews_mobile_app = json_to_DataFrame("pageviews_mobile-app_201507-201709.json")
df_pagecounts_desktop.head()
df_pagecounts_mobile.head()
df_pageviews_desktop.head()
df_pageviews_mobile_web.head()
df_pageviews_mobile_app.head()
df_pageviews_mobile = copy.copy(df_pageviews_mobile_web)
df_pageviews_mobile.views = df_pageviews_mobile.views + df_pageviews_mobile_app.views
df_pageviews_mobile.head()
# Merge mobile and desktop of pagecounts dataframes.
df_pagecounts = pd.merge(df_pagecounts_desktop, df_pagecounts_mobile, on='timestamp', how='left')
# Merge mobile and desktop of pageviews dataframes
df_pageviews = pd.merge(df_pageviews_desktop, df_pageviews_mobile, on='timestamp')
# Subset the merged dataframes and rename column names
df_pagecounts = df_pagecounts[['timestamp', 'count_x', 'count_y']]
df_pagecounts.columns = ['timestamp', 'pagecount_desktop_views', 'pagecount_mobile_views']
df_pageviews = df_pageviews[['timestamp', 'views_x', 'views_y']]
df_pageviews.columns = ['timestamp', 'pageview_desktop_views', 'pageview_mobile_views']
# Finally, merge pagecount and pageview dataframes
df_total = pd.merge(df_pagecounts, df_pageviews, on='timestamp', how='left')
# Fill the missing value with 0
df_total = df_total.fillna(value = 0)
# Create two new columns, 'pagecount_all_views' and 'pageview_all_views', that are both the combinition of
# mobile and desktop counts respectively.
df_total['pagecount_all_views'] = df_total.pagecount_desktop_views + df_total.pagecount_mobile_views
df_total['pageview_all_views'] = df_total.pageview_desktop_views + df_total.pageview_mobile_views
# Create two new columns, 'year' and 'month', that are both based on the column 'timestamp'
df_total.timestamp = pd.to_datetime(df_total.timestamp, format='%Y%m%d%H')
df_total['year'] = df_total['timestamp'].dt.year
df_total['month'] = df_total['timestamp'].dt.month
# Do subsetting to get the final dataframe
df_total = df_total[['year', 'month',
'pagecount_all_views', 'pagecount_desktop_views', 'pagecount_mobile_views',
'pageview_all_views', 'pageview_desktop_views', 'pageview_mobile_views']]
df_total.head()
df_total.to_csv("en-wikipedia_traffic_200801-201709.csv", sep='\t', index=False)
df = pd.read_csv("en-wikipedia_traffic_200801-201709.csv", sep='\t')
df['day'] = '01'
df['date']= df.apply(lambda x:datetime.strptime("{0} {1} {2}".format(x['year'],x['month'], x['day']), "%Y %m %d"),axis=1)
trace1 = go.Scatter(
x = df[df.pagecount_all_views != 0].date,
y = df[df.pagecount_all_views != 0].pagecount_all_views,
name='Total(Pagecount definition)',
line = dict(color = 'black', dash = 'dash')
)
trace2 = go.Scatter(
x = df[df.pagecount_desktop_views != 0].date,
y = df[df.pagecount_desktop_views != 0].pagecount_desktop_views,
name='Main site(Pagecount definition)',
line = dict(color = 'green', dash = 'dash')
)
trace3 = go.Scatter(
x = df[df.pagecount_mobile_views != 0].date,
y = df[df.pagecount_mobile_views != 0].pagecount_mobile_views,
name='Mobile(Pagecount definition)',
line = dict(color = 'blue', dash = 'dash')
)
trace4 = go.Scatter(
x = df[df.pageview_all_views != 0].date,
y = df[df.pageview_all_views != 0].pageview_all_views,
name='Total(Pageview definition)',
line = dict(color = 'black')
)
trace5 = go.Scatter(
x = df[df.pageview_desktop_views != 0].date,
y = df[df.pageview_desktop_views != 0].pageview_desktop_views,
name='Main site(Pageview definition)',
line = dict(color = 'green')
)
trace6 = go.Scatter(
x = df[df.pageview_mobile_views != 0].date,
y = df[df.pageview_mobile_views != 0].pageview_mobile_views,
name='Mobile(Pagecount definition)',
line = dict(color = 'blue')
)
data = [trace1, trace2, trace3, trace4, trace5, trace6]
layout = dict(title = 'Page Views on English Wikipedia',
height= 600, width = 1000,
xaxis = dict(title = ''),
yaxis = dict(title = 'Page views', range=[0, 12000000000]),
legend=dict(orientation="h")
)
fig = dict(data=data, layout=layout)
plotly.offline.iplot(fig, filename='PageViewsonEnglishWikipedia')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading it in and taking a look.
Step2: Time is in four hour intervals (generally). The time listed is when a particular time interval ends. Entries, and Exits are cumulative counts. C/A - Control Area - and Unit are station identifiers. SCP is a turnstile identifier. Turnstiles are the most granular level of collection. After a little research a teammate of mine discovered that a unit can have up to four turnstiles. So we'll be able to uniquely identify a count by a Station-Unit-SCP tuple. Control Area will not be useful for us. Neither will Linename or Division or DESC so I will drop these.
Step3: Now we want to transform the cumulative counts for entries and exits into raw number of entries and exits. We will do so on the turnstile level. First things first let's make the Date and time columns into one column with a Datetime object in it. And just quickly before I do that, I will remove all the header rows of the data.
Step4: Now let's investigate the length of these time intervals. Since the data that I grabbed is from many different weeks and the time intervals between different weeks might be unmeaningful because I might not have grabbed the data from the next week, I will start by inserting a week column that will be the number of weeks since 08/08/2015. Then I will group by this variable in addition to Unit and SCP and do a diff operation on the datetime column.
Step5: The first value is 'NaT' which I guess stands for Not a Time. This is good. Now let's take a look at the values.
Step6: So, we can see that the data are messy. This is good information, but it actually should not matter for the question at hand. If we assume that the irregularities in the time intervals do not occur more or less often on any day of the week, we can just ignore this irregularity. To me this seems like a safe assumption so I will proceed under it. Now let's do a diff operation on the Cumulative entries and exits to see what the data look like.
Step7: Let's check out the smallest values of both ent_diffs and ex_diffs.
Step8: There are negative diff values for both Entries and Exits. This makes no sense. Since we can't explain these values, I say we drop them from the set so they don't effect the analysis. Again this assumes that these negative values are not meaningful and further that any meaning they have is not different between different days of the week. Personally, I think this is reasonable. Short of calling an MTA official and asking him about the negative values, there's not much else I can do.
Step9: Now let's look at the other end of the spectrum to make sure the largest values are reasonable.
Step10: Those values are definitely not reasonable. Let's just make a quick assumption that no more than 14,400 people can exit or enter per turnstile in a given interval (that's one person per second for the four hour intervals).
Step11: Now we are getting closer and closer to what we'll consider to be clean data. As a next step let's say the measure of overall traffic is entries plus exits. Also let's add a day of the week column, and drop all the columns we won't need from the point forward.
Step12: Now we are only concerned with the morning commute time. Remember the time listed is for when the interval ends. Now I will select only the observations that correspond to the morning commute window. At the beginning I said I was interested in the time around 8
Step13: Now we are ready to drop all but week, day_of_week, and tot_traffic. Then we can group by week and day_of_week and sum tot_traffic so that we just have 175 = 25 * 7 rows, one for each observed day.
Step14: I kept the weekends (day_of_week = 5 or 6) in up to this point just to see how the traffic values would compare to weekdays. Now we can see the weekend values are consistently about half to a third of the weekday values. Now I'll drop the weekend values so we can just focus on the task at hand.
Step15: It'd be nice to make each week a row and each day of the week a column with tot_traffic in each cell.
Step16: Now we can hypothesis test whether the mean of Mondays is different from the mean of Tuesdays and mean of Wednesdays as so on. First lets visualize the data a little.
Step17: The means are the dots and the medians are the lines. The Friday values do seems to be smaller.
Step18: The means do seem to be different, but how likely is it that this difference is due to random chance?
Step19: I've set the whiskers of the boxplot to enclose 99% of the means. As you can see, the mean of the Fridays seems to fall just barely inside this threshold. This indicates it's very unlikely that the mean that presents in the Friday group could occur in a distribution of means of random samples drawn from the whole population of weekdays. Not impossible, but unlikely. Exactly how likely you may ask?
Step20: Now, let's do this one other way. Instead of a pool then sample method, I'll use resampling. Since I am investigating the difference in each day of the week's ridership to the average weekday, I'll use this strategy
|
<ASSISTANT_TASK:>
Python Code:
weeks = ['150815','150822','150829','150919','150926',
'151003','151024','151121','151212','151219',
'160130','160206','160227','160305','160312',
'160326','160409','160416','160430','160507',
'160514','160521','160611','160618','160625']
urls = ['''http://web.mta.info/developers/data/nyct/turnstile
/turnstile_%s.txt''' % week for week in weeks]
!touch data.txt
for url in urls:
!curl -s {url} >> data.txt
import pandas as pd
data = pd.read_csv('data.txt',low_memory=False)
data.columns = [col.strip() for col in data.columns]
print data.shape
print data.columns.tolist()
print data.head(3)
data = data.drop(['C/A','LINENAME','DIVISION','DESC'],axis=1)
data.columns.tolist()
data = data.drop(data[data.DATE=='DATE'].index,axis=0)
from datetime import datetime, date, timedelta, time
data.insert(3,'datetime',[datetime.strptime(d+t,'%m/%d/%Y%X')
for d,t in zip(data.DATE,data.TIME)])
data = data.drop(['DATE','TIME'],axis=1)
data.head(3)
d_start = datetime.combine(date(2015,8,8), datetime.min.time())
sec_in_week = timedelta(weeks=1).total_seconds()
data.insert(4,'week',[int((dt.to_pydatetime() - d_start).total_seconds()/sec_in_week)
for dt in data.datetime])
print len(pd.unique(data.week))
print data.week.value_counts().head()
data.insert(4,'dt_diffs',data.groupby(['UNIT','SCP','week'])['datetime'].transform(pd.Series.diff))
print data.dt_diffs.head()
print data.dt_diffs.value_counts().head(3)
print (data.dt_diffs.value_counts()/len(data)).head(3)
data['ENTRIES'] = pd.to_numeric(data['ENTRIES'])
data.insert(len(data.columns),'ent_diffs',
data.groupby(['UNIT','SCP','week'])['ENTRIES'].transform(pd.Series.diff))
data['EXITS'] = pd.to_numeric(data['EXITS'])
data.insert(len(data.columns),'ex_diffs',
data.groupby(['UNIT','SCP','week'])['EXITS'].transform(pd.Series.diff))
data.head(3)
print data.ent_diffs.sort_values().tolist()[0:3]
print data.ex_diffs.sort_values().tolist()[0:3]
before = len(data)
data = data.drop(data[(data.ent_diffs<0) | (data.ex_diffs<0)].index, axis=0)
after = len(data)
shrinkage = float(before-after)/before * 100
print 'Data have been shrunk by %g %%' % shrinkage
print data.ent_diffs.sort_values(ascending=False).tolist()[0:3]
print data.ex_diffs.sort_values(ascending=False).tolist()[0:3]
before = len(data)
data = data.drop(data[(data.ent_diffs>14400) | (data.ex_diffs>14400)].index, axis=0)
after = len(data)
shrinkage = float(before-after)/before * 100
print 'Data have been shrunk by %g %%' % shrinkage
data['tot_traffic'] = data.ent_diffs + data.ex_diffs
data.insert(0,'day_of_week',[dt.weekday() for dt in data.datetime])
data = data[['datetime','week','day_of_week','tot_traffic']]
data.head() ## 5 is for Saturday
t_beg, t_end = time(9,0,0), time(12,0,0)
data.insert(1,'time',[dt.time() for dt in data.datetime])
data = data[(data.time>=t_beg) & (data.time<=t_end)]
data.head(3)
data = data[['week','day_of_week','tot_traffic']]
grouped = data.groupby(['week','day_of_week'],as_index=False)
grouped.tot_traffic.sum().head(8)
nice_data = grouped.tot_traffic.sum()[~grouped.tot_traffic.sum().day_of_week.isin([5,6])]
nice_data.head(7)
nice_data = nice_data.pivot(index='week', columns='day_of_week', values='tot_traffic')
nice_data.iloc[0:5]
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(12,7))
plt.boxplot([nice_data[col] for col in nice_data.columns.tolist()], showmeans=True);
plt.xticks(nice_data.columns + 1, ['Mon','Tues','Wed','Thur','Fri']);
plt.tick_params(top='off',bottom='off');
plt.ylabel('Total Number of Entries and Exits',fontsize=16);
plt.xlabel('Day of the Week',fontsize=13);
plt.title('Entries and Exits for 25 weeks Grouped by Day of Week',fontsize=14);
[nice_data[col].mean() for col in nice_data.columns]
import numpy as np
def draw_samples(population,size,iters):
means = []
for _ in xrange(iters):
mean = np.mean(np.random.choice(population,size,replace=False))
means.append(mean)
return means
means = draw_samples(np.ravel(nice_data),25,10000)
fig, ax = plt.subplots(figsize=(12,7))
plt.boxplot(means,whis=[.5, 99.5]);
plt.xticks([.25,.5,.75,1.33,1.66], ['Mon','Tues','Wed','Thur','Fri']);
plt.scatter([.25,.5,.75,1.33,1.66],[nice_data[col].mean() for col in nice_data.columns])
ax.set_xlim([0, 2]);
plt.ylabel('Mean of Groups/Mean of Samples',fontsize=14);
plt.xlabel('Day of the Week/Samples',fontsize=12);
plt.title('Mean Total Traffic for each Day of Week AND Means of 10,000 Samples of size 25 from All Days');
liklihood = float(sum(1 for mean in means if mean<nice_data[4].mean())) / len(means)
print liklihood
minus_the_mean = pd.DataFrame(nice_data.values - np.array([[mean] for mean in nice_data.mean(1)]))
minus_the_mean.head()
def resample(sample,iters):
means = []
for _ in xrange(iters):
mean = np.mean(np.random.choice(sample,len(sample),replace=True))
means.append(mean)
return means
fig, ax = plt.subplots(figsize=(12,7));
plt.boxplot([resample(minus_the_mean[col],10000) for col in range(5)],whis=[.5, 99.5]);
plt.axhline(0,ls='--',color='black')
plt.xticks(range(1,6), ['Mon','Tues','Wed','Thur','Fri']);
plt.ylabel("Observed Ridership Minus Week's Average Ridership",fontsize=14);
plt.xlabel('Day of the Week',fontsize=12);
plt.title('Resampled Distribution of Means of Observed Ridership Minus Average Ridership Across Week');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you check use the Inspect element feature from Google Chrome, you will see that the name of the index is inside an <h1> tag which has a class = name attribute. Let's first find all the <h1> guys from the page.
Step2: Obviously, this is not what we want (index name). Thus, we shoudl explicitly mention the attributes we are looking for
Step3: Now, this works, we ere able to find the correct tag. Let's get the text out of it.
Step4: The reason we used index_name[0] notation is that index_name is a list (although consisting of only one element, but still a list). The text methods works only on strings/tags, so we had to explicitly choose the tag from the list and then apply text method on it.
Step5: Multiple indices (print)
Step6: Multiple indices (save to dict)
Step7: Let's pretty print the content of the dictionary.
Step8: Being nice
Step9: Usually, different websites mention in their documentation how long you need to sleep before each request. THe average duration is 30 seconds.
Step10: Construct a dataframe
Step11: Transpose the dataframe
Step12: And of course, if we are dealing with pandas, we can easily save the dataframe to csv as follows.
|
<ASSISTANT_TASK:>
Python Code:
import requests
from BeautifulSoup import *
url = "http://www.bloomberg.com/quote/SPX:IND"
response = requests.get(url)
page = response.text
soup = BeautifulSoup(page)
soup.findAll('h1')
index_name = soup.findAll('h1', attrs={'class': 'name'})
print(index_name)
print(index_name[0].text)
index_value = soup.findAll('div', attrs={'class':'price'})
print(index_value[0].text)
urls = ["https://www.bloomberg.com/quote/DM1:IND",
"https://www.bloomberg.com/quote/UKX:IND",
"https://www.bloomberg.com/quote/EURUSD:CUR" ]
for url in urls:
response = requests.get(url)
page = response.text
soup = BeautifulSoup(page)
index_name = soup.find("h1",attrs={'class':'name'})
index_value = soup.find("div",attrs={'class':'price'})
print(index_name.text+": "+index_value.text+"\n")
my_data = {}
for url in urls:
response = requests.get(url)
page = response.text
soup = BeautifulSoup(page)
index_name = soup.find("h1",attrs={'class':'name'})
index_value = soup.find("div",attrs={'class':'price'})
my_data.update({index_name.text:index_value.text})
from pprint import pprint
pprint(my_data)
# This is the same code as above with 2 additional lines
import time # importing the library
my_data = {}
for url in urls:
response = requests.get(url)
page = response.text
soup = BeautifulSoup(page)
index_name = soup.find("h1",attrs={'class':'name'})
index_value = soup.find("div",attrs={'class':'price'})
my_data.update({index_name.text:index_value.text})
time.sleep(10) # asking for loop to sleep 10 seconds
import csv
from datetime import datetime
with open("index_data.csv","w") as file: # create a new file for writing purposes
writer = csv.writer(file)
for i in my_data:
writer.writerow([i,my_data[i],datetime.now()])
import pandas as pd
data = pd.DataFrame.from_dict(my_data,"index")
print(data)
data.transpose()
data.transpose().to_csv("index_dataframe.csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note that when we create the builder, it adds the internal RNN parameters to the model.
Step2: If our LSTM/RNN was one layer deep, y2 would be equal to the hidden state. However, since it is 2 layers deep, y2 is only the hidden state (= output) of the last layer.
Step3: The same interface that we saw until now for the LSTM, holds also for the Simple RNN
Step4: To summarize, when calling .add_input(x) on an RNNState what happens is that the state creates a new RNN/LSTM column, passing it
Step5: As we can see, the LSTM has two extra state expressions (one for each hidden layer) before the outputs h.
Step6: Charecter-level LSTM
Step7: Notice that
Step8: The model seem to learn the sentence quite well.
|
<ASSISTANT_TASK:>
Python Code:
model = Model()
NUM_LAYERS=2
INPUT_DIM=50
HIDDEN_DIM=10
builder = LSTMBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, model)
# or:
# builder = SimpleRNNBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, model)
s0 = builder.initial_state()
x1 = vecInput(INPUT_DIM)
s1=s0.add_input(x1)
y1 = s1.output()
# here, we add x1 to the RNN, and the output we get from the top is y (a HIDEN_DIM-dim vector)
y1.npvalue().shape
s2=s1.add_input(x1) # we can add another input
y2=s2.output()
print s2.h()
# create a simple rnn builder
rnnbuilder=SimpleRNNBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, model)
# initialize a new graph, and a new sequence
rs0 = rnnbuilder.initial_state()
# add inputs
rs1 = rs0.add_input(x1)
ry1 = rs1.output()
print "all layers:", s1.h()
print s1.s()
rnn_h = rs1.h()
rnn_s = rs1.s()
print "RNN h:", rnn_h
print "RNN s:", rnn_s
lstm_h = s1.h()
lstm_s = s1.s()
print "LSTM h:", lstm_h
print "LSTM s:", lstm_s
s2=s1.add_input(x1)
s3=s2.add_input(x1)
s4=s3.add_input(x1)
# let's continue s3 with a new input.
s5=s3.add_input(x1)
# we now have two different sequences:
# s0,s1,s2,s3,s4
# s0,s1,s2,s3,s5
# the two sequences share parameters.
assert(s5.prev() == s3)
assert(s4.prev() == s3)
s6=s3.prev().add_input(x1)
# we now have an additional sequence:
# s0,s1,s2,s6
s6.h()
s6.s()
import random
from collections import defaultdict
from itertools import count
import sys
LAYERS = 2
INPUT_DIM = 50
HIDDEN_DIM = 50
characters = list("abcdefghijklmnopqrstuvwxyz ")
characters.append("<EOS>")
int2char = list(characters)
char2int = {c:i for i,c in enumerate(characters)}
VOCAB_SIZE = len(characters)
model = Model()
srnn = SimpleRNNBuilder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
lstm = LSTMBuilder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
model.add_lookup_parameters("lookup", (VOCAB_SIZE, INPUT_DIM))
model.add_parameters("R", (VOCAB_SIZE, HIDDEN_DIM))
model.add_parameters("bias", (VOCAB_SIZE))
# return compute loss of RNN for one sentence
def do_one_sentence(rnn, sentence):
# setup the sentence
renew_cg()
s0 = rnn.initial_state()
R = parameter(model["R"])
bias = parameter(model["bias"])
lookup = model["lookup"]
sentence = ["<EOS>"] + list(sentence) + ["<EOS>"]
sentence = [char2int[c] for c in sentence]
s = s0
loss = []
for char,next_char in zip(sentence,sentence[1:]):
s = s.add_input(lookup[char])
probs = softmax(R*s.output() + bias)
loss.append( -log(pick(probs,next_char)) )
loss = esum(loss)
return loss
# generate from model:
def generate(rnn):
def sample(probs):
rnd = random.random()
for i,p in enumerate(probs):
rnd -= p
if rnd <= 0: break
return i
# setup the sentence
renew_cg()
s0 = rnn.initial_state()
R = parameter(model["R"])
bias = parameter(model["bias"])
lookup = model["lookup"]
s = s0.add_input(lookup[char2int["<EOS>"]])
out=[]
while True:
probs = softmax(R*s.output() + bias)
probs = probs.vec_value()
next_char = sample(probs)
out.append(int2char[next_char])
if out[-1] == "<EOS>": break
s = s.add_input(lookup[next_char])
return "".join(out[:-1]) # strip the <EOS>
# train, and generate every 5 samples
def train(rnn, sentence):
trainer = SimpleSGDTrainer(model)
for i in xrange(200):
loss = do_one_sentence(rnn, sentence)
loss_value = loss.value()
loss.backward()
trainer.update()
if i % 5 == 0:
print loss_value,
print generate(rnn)
sentence = "a quick brown fox jumped over the lazy dog"
train(srnn, sentence)
sentence = "a quick brown fox jumped over the lazy dog"
train(lstm, sentence)
train(srnn, "these pretzels are making me thirsty")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Übungsblatt 4
Step2: Sammeln Sie Sätze, die als grammatisch erkannt werden sollten, am besten in einer Liste.
Step3: Die folgende Funktion kann Ihnen helfen, eine Reihe von Sätzen zu analysieren.
Step4: Führen Sie den Test jetzt für grammar1 aus!
Step6: Aufgabe 2 Ein- und zweistellige Verben
Step8: Aufgabe 3 Beliebig lange Phrasen
Step9: Hausaufgaben
Step11: Stellen Sie außerdem sicher, dass folgende Sätze KEINE Analyse liefern!
Step12: Aufgabe 5 Präpositionalphrasen
Step14: Beachten Sie, dass die Form des Artikels dem und das Nomen Tisch auch noch als lexikalische Regeln ergänzt werden müssen.
|
<ASSISTANT_TASK:>
Python Code:
grammar1 =
S -> NP VP
NP -> DET N
DET -> "der" | "die" | "das"
N -> "Mann" | "Frau" | "Buch"
VP -> V NP NP
V -> "gibt" | "schenkt"
test_sentences = [
"der Mann gibt der Frau das Buch"
]
import nltk
from IPython.display import display
import sys
def test_grammar(grammar, sentences):
cfg = nltk.CFG.fromstring(grammar)
rd_parser = nltk.RecursiveDescentParser(cfg)
for i, sent in enumerate(sentences, 1):
print("Satz {}: {}".format(i, sent))
results = rd_parser.parse(sent.split())
analyzed = False
for tree in results:
tree.pretty_print(unicodelines=True) # tree.draw() oder display(tree)
analyzed = True
if not analyzed:
print("Keine Analyse möglich", file=sys.stderr)
test_grammar(grammar1, test_sentences)
test_sentences.extend([
"der Mann schläft",
"das Buch gefällt der Frau",
"die Frau kennt das Buch"
])
grammar2 =
S -> NP VP
NP -> DET N
DET -> "der" | "die" | "das"
N -> "Mann" | "Frau" | "Buch"
VP -> V NP NP
V -> "gibt" | "schenkt"
test_grammar(grammar2, test_sentences)
test_sentences.extend([
"die kluge schöne Frau kennt das Buch",
"der schöne kluge Mann gibt der Frau das dicke Buch",
"das dicke schöne kluge Buch schläft"
])
grammar3 =
S -> NP VP
NP -> DET N
DET -> "der" | "die" | "das"
N -> "Mann" | "Frau" | "Buch"
VP -> V NP NP
V -> "gibt" | "schenkt"
test_grammar(grammar3, test_sentences)
test_sentences.extend([
"der Mann kennt Chomsky",
"Marie gibt Fritz das Buch"
])
negative_examples = [
"Mann gibt Frau Buch",
"Mann schläft"
]
grammar4 =
S -> NP VP
NP -> DET N
DET -> "der" | "die" | "das"
N -> "Mann" | "Frau" | "Buch"
VP -> V NP NP
V -> "gibt" | "schenkt"
test_grammar(grammar4, test_sentences)
test_sentences.extend([
"der Mann schläft neben dem Buch",
"die Frau kennt das dicke Buch über Chomsky",
"die Frau schenkt dem Mann das Buch auf dem Tisch"
])
grammar5 =
S -> NP VP
NP -> DET N
DET -> "der" | "die" | "das"
N -> "Mann" | "Frau" | "Buch"
VP -> V NP NP
V -> "gibt" | "schenkt"
test_grammar(grammar5, test_sentences)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: II. RNN
Step2: III. Sampling
|
<ASSISTANT_TASK:>
Python Code:
import keras
from keras.layers import Concatenate,Dense,Embedding
rnn_num_units = 64
embedding_size = 16
#Let's create layers for our recurrent network
#Note: we create layers but we don't "apply" them yet
embed_x = Embedding(n_tokens,embedding_size) # an embedding layer that converts character ids into embeddings
#a dense layer that maps input and previous state to new hidden state, [x_t,h_t]->h_t+1
get_h_next = Dense(rnn_num_units, activation="tanh")
#a dense layer that maps current hidden state to probabilities of characters [h_t+1]->P(x_t+1|h_t+1)
get_probas = Dense(n_tokens, activation="softmax")
def rnn_one_step(x_t, h_t):
Recurrent neural network step that produces next state and output
given prev input and previous state.
We'll call this method repeatedly to produce the whole sequence.
#convert character id into embedding
x_t_emb = embed_x(tf.reshape(x_t,[-1,1]))[:,0]
#print(tf.shape(x_t_emb)) #Tensor("Shape_16:0", shape=(2,), dtype=int32)
#print(tf.shape(h_t)) #Tensor("Shape_16:0", shape=(2,), dtype=int32)
#concatenate x embedding and previous h state
#x_and_h = Concatenate()([x_t_emb, h_t])###YOUR CODE HERE <keras.layers.merge.Concatenate object at 0x7f87e5bfc6a0>
x_and_h = tf.concat([x_t_emb, h_t], 1)
#compute next state given x_and_h
h_next = get_h_next(x_and_h)
#get probabilities for language model P(x_next|h_next)
output_probas = get_probas(h_next)
return output_probas,h_next
input_sequence = tf.placeholder('int32',(MAX_LENGTH,None))
batch_size = tf.shape(input_sequence)[1]
predicted_probas = []
h_prev = tf.zeros([batch_size,rnn_num_units]) #initial hidden state
for t in range(MAX_LENGTH): #for every time-step 't' ( each character)
x_t = input_sequence[t]
probas_next,h_next = rnn_one_step(x_t,h_prev)
h_prev = h_next
predicted_probas.append(probas_next)
predicted_probas = tf.stack(predicted_probas)
predictions_matrix = tf.reshape(predicted_probas[:-1],[-1,len(tokens)])
answers_matrix = tf.one_hot(tf.reshape(input_sequence[1:],[-1]), n_tokens)
from keras.objectives import categorical_crossentropy
loss = tf.reduce_mean(categorical_crossentropy(answers_matrix, predictions_matrix))
optimize = tf.train.AdamOptimizer().minimize(loss)
from IPython.display import clear_output
from random import sample
s = keras.backend.get_session()
s.run(tf.global_variables_initializer())
history = []
for i in range(5000):
batch = to_matrix(sample(names,32),max_len=MAX_LENGTH)
loss_i,_ = s.run([loss,optimize],{input_sequence:batch})
history.append(loss_i)
if (i+1)%100==0:
clear_output(True)
plt.plot(history,label='loss')
plt.legend()
plt.show()
assert np.mean(history[:10]) > np.mean(history[-10:]), "RNN didn't converge."
x_t = tf.placeholder('int32',(None,))
h_t = tf.Variable(np.zeros([1,rnn_num_units],'float32'))
next_probs,next_h = rnn_one_step(x_t,h_t)
def generate_sample(seed_phrase=None,max_length=MAX_LENGTH):
'''
The function generates text given a phrase of length at least SEQ_LENGTH.
parameters:
The phrase is set using the variable seed_phrase
The optional input "N" is used to set the number of characters of text to predict.
'''
if seed_phrase==None:
seed_phrase=' '
else:
seed_phrase=' ' + str(seed_phrase).strip().lower()
x_sequence = [token_to_id[token] for token in seed_phrase]
s.run(tf.assign(h_t,h_t.initial_value))
#feed the seed phrase, if any
for ix in x_sequence[:-1]:
s.run(tf.assign(h_t,next_h),{x_t:[ix]})
#start generating
for _ in range(max_length-len(seed_phrase)):
x_probs,_ = s.run([next_probs,tf.assign(h_t,next_h)],{x_t:[x_sequence[-1]]})
x_sequence.append(np.random.choice(n_tokens,p=x_probs[0]))
return ''.join([tokens[ix] for ix in x_sequence])
for i in range(3):
print(str(i+1) + ". " + generate_sample())
for i in range(5):
print(str(i+1) + ". " + generate_sample())
for i in range(3):
print(str(i+1) + ". " + generate_sample("trump"))
for i in range(5):
print(str(i+1) + ". " + generate_sample("trump"))
for i in range(10):
print(str(i+1) + ". " + generate_sample("Kwapich"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The spectrum is defined as the display or specification of the monochromatic components of the radiation considered. <a name="back_reference_3"></a><a href="#reference_3">[3]</a>
Step2: Note
Step3: Note
Step4: The sample spectral power distribution can be easily plotted against the visible spectrum
Step5: With the sample spectral power distribution defined, we can retrieve its shape
Step6: The shape returned is an instance of colour.SpectralShape class
Step7: colour.SpectralShape is used throughout Colour to define spectral dimensions and is instantiated as follows
Step8: Colour defines three convenient objects to create constant spectral power distributions
Step9: By default the shape used by colour.constant_spd, colour.zeros_spd and colour.ones_spd is the one defined by colour.DEFAULT_SPECTRAL_SHAPE attribute using the CIE 1931 2° Standard Observer shape.
Step10: A custom shape can be passed to construct a constant spectral power distribution with tailored dimensions
Step11: Often interpolation of the spectral power distribution is needed, this is achieved with the colour.SpectralPowerDistribution.interpolate method. Depending on the wavelengths uniformity, the default interpolation method will differ. Following CIE 167
Step12: Since the sample spectral power distribution is uniform the interpolation will be using the colour.SpragueInterpolator interpolator.
Step13: Extrapolation although dangerous can be used to help aligning two spectral power distributions together. CIE 015
Step14: The underlying interpolator can be swapped for any of the Colour interpolators.
Step15: The extrapolation behaviour can be changed for Linear method instead of the Constant default method or even use arbitrary constant left and right values
Step16: Aligning a spectral power distribution is a convenient way to first interpolate the current data within its original bounds then if needed extrapolates any missing values to match the requested shape
Step17: The colour.SpectralPowerDistribution class also supports various arithmetic operations like addition, subtraction, multiplication, division or exponentiation with numeric and array_like variables or other colour.SpectralPowerDistribution class instances
Step18: The spectral power distribution can be normalised with an arbitrary factor
Step19: Colour Matching Functions
Step20: With an RGB model of human vision based on Wright & Guild 1931 2° RGB CMFs $\bar{r}(\lambda)$,$\bar{g}(\lambda)$,$\bar{b}(\lambda)$ colour matching functions and for pragmatic reasons the CIE members developed a new colour space that would relate to the CIE RGB colourspace but for which all tristimulus values would be positive for real colours
Step21: In the 1960's it appeared that cones were present in a larger region of eye than the one initially covered by the experiments that lead to the CIE 1931 2° Standard Observer specification.
Step22: Note
Step23: Retrieving the CIE XYZ tristimulus values of any wavelength from colour matching functions is done using the colour.wavelength_to_XYZ definition, if the value requested is not available, the colour matching functions will be interpolated following CIE 167
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import colour
from colour.plotting import *
colour.filter_warnings(True, False)
colour_plotting_defaults()
# Plotting the visible spectrum.
visible_spectrum_plot()
from pprint import pprint
import colour.colorimetry as colorimetry
pprint(colorimetry.__all__)
import colour.colorimetry.dataset as dataset
pprint(dataset.__all__)
import colour
# Defining a sample spectral power distribution data.
sample_spd_data = {
380: 0.048,
385: 0.051,
390: 0.055,
395: 0.06,
400: 0.065,
405: 0.068,
410: 0.068,
415: 0.067,
420: 0.064,
425: 0.062,
430: 0.059,
435: 0.057,
440: 0.055,
445: 0.054,
450: 0.053,
455: 0.053,
460: 0.052,
465: 0.052,
470: 0.052,
475: 0.053,
480: 0.054,
485: 0.055,
490: 0.057,
495: 0.059,
500: 0.061,
505: 0.062,
510: 0.065,
515: 0.067,
520: 0.070,
525: 0.072,
530: 0.074,
535: 0.075,
540: 0.076,
545: 0.078,
550: 0.079,
555: 0.082,
560: 0.087,
565: 0.092,
570: 0.100,
575: 0.107,
580: 0.115,
585: 0.122,
590: 0.129,
595: 0.134,
600: 0.138,
605: 0.142,
610: 0.146,
615: 0.150,
620: 0.154,
625: 0.158,
630: 0.163,
635: 0.167,
640: 0.173,
645: 0.180,
650: 0.188,
655: 0.196,
660: 0.204,
665: 0.213,
670: 0.222,
675: 0.231,
680: 0.242,
685: 0.251,
690: 0.261,
695: 0.271,
700: 0.282,
705: 0.294,
710: 0.305,
715: 0.318,
720: 0.334,
725: 0.354,
730: 0.372,
735: 0.392,
740: 0.409,
745: 0.420,
750: 0.436,
755: 0.450,
760: 0.462,
765: 0.465,
770: 0.448,
775: 0.432,
780: 0.421}
spd = colour.SpectralPowerDistribution(sample_spd_data, name='Sample')
print(spd)
# Plotting the sample spectral power distribution.
single_spd_plot(spd)
# Displaying the sample spectral power distribution shape.
print(spd.shape)
repr(spd.shape)
# Using *colour.SpectralShape* with iteration.
shape = colour.SpectralShape(start=0, end=10, interval=1)
for wavelength in shape:
print(wavelength)
# *colour.SpectralShape.range* method is providing the complete range of values.
shape = colour.SpectralShape(0, 10, 0.5)
shape.range()
# Defining a constant spectral power distribution.
constant_spd = colour.constant_spd(100)
print('"Constant Spectral Power Distribution"')
print(constant_spd.shape)
print(constant_spd[400])
# Defining a zeros filled spectral power distribution.
print('\n"Zeros Filled Spectral Power Distribution"')
zeros_spd = colour.zeros_spd()
print(zeros_spd.shape)
print(zeros_spd[400])
# Defining a ones filled spectral power distribution.
print('\n"Ones Filled Spectral Power Distribution"')
ones_spd = colour.ones_spd()
print(ones_spd.shape)
print(ones_spd[400])
print(repr(colour.DEFAULT_SPECTRAL_SHAPE))
colour.ones_spd(colour.SpectralShape(400, 700, 5))[450]
# Checking the sample spectral power distribution uniformity.
print(spd.is_uniform())
# Copying the sample spectral power distribution.
spd_copy = spd.copy()
# Interpolating the copied sample spectral power distribution.
spd_copy.interpolate(colour.SpectralShape(400, 770, 1))
spd_copy[401]
# Comparing the interpolated spectral power distribution with the original one.
multi_spd_plot([spd, spd_copy], bounding_box=[730,780, 0.1, 0.5])
# Extrapolating the copied sample spectral power distribution.
spd_copy.extrapolate(colour.SpectralShape(340, 830))
spd_copy[340], spd_copy[830]
pprint([
export for export in colour.algebra.interpolation.__all__
if 'Interpolator' in export
])
# Changing interpolator while trimming the copied spectral power distribution.
spd_copy.interpolate(
colour.SpectralShape(400, 700, 10), interpolator=colour.LinearInterpolator)
# Extrapolating the copied sample spectral power distribution with *Linear* method.
spd_copy.extrapolate(
colour.SpectralShape(340, 830),
extrapolator_args={'method': 'Linear',
'right': 0})
spd_copy[340], spd_copy[830]
# Aligning the cloned sample spectral power distribution.
# We first trim the spectral power distribution as above.
spd_copy.interpolate(colour.SpectralShape(400, 700))
spd_copy.align(colour.SpectralShape(340, 830, 5))
spd_copy[340], spd_copy[830]
spd = colour.SpectralPowerDistribution({
410: 0.25,
420: 0.50,
430: 0.75,
440: 1.0,
450: 0.75,
460: 0.50,
480: 0.25
})
print((spd.copy() + 1).values)
print((spd.copy() * 2).values)
print((spd * [0.35, 1.55, 0.75, 2.55, 0.95, 0.65, 0.15]).values)
print((spd * colour.constant_spd(2, spd.shape) * colour.constant_spd(3, spd.shape)).values)
print(spd.normalise().values)
print(spd.normalise(100).values)
# Plotting *Wright & Guild 1931 2 Degree RGB CMFs* colour matching functions.
single_cmfs_plot('Wright & Guild 1931 2 Degree RGB CMFs')
# Plotting *CIE XYZ 1931 2 Degree Standard Observer* colour matching functions.
single_cmfs_plot('CIE 1931 2 Degree Standard Observer')
spd = colour.SpectralPowerDistribution(sample_spd_data, name='Sample')
cmfs = colour.STANDARD_OBSERVERS_CMFS['CIE 1931 2 Degree Standard Observer']
illuminant = colour.ILLUMINANTS_RELATIVE_SPDS['A']
# Calculating the sample spectral power distribution *CIE XYZ* tristimulus values.
colour.spectral_to_XYZ(spd, cmfs, illuminant)
import pylab
# Plotting the *CIE 1931 Chromaticity Diagram*.
# The argument *standalone=False* is passed so that the plot doesn't get displayed
# and can be used as a basis for other plots.
chromaticity_diagram_plot_CIE1931(standalone=False)
# Calculating the *xy* chromaticity coordinates.
# The output domain of *colour.spectral_to_XYZ* is [0, 100] and
# the input domain of *colour.XYZ_to_sRGB* is [0, 1].
# We need to take it in account and rescale the input *CIE XYZ* colourspace matrix.
x, y = colour.XYZ_to_xy(colour.spectral_to_XYZ(spd, cmfs, illuminant) / 100)
# Plotting the *xy* chromaticity coordinates.
pylab.plot(x, y, 'o-', color='white')
# Annotating the plot.
pylab.annotate(spd.name,
xy=(x, y),
xytext=(-50, 30),
textcoords='offset points',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3, rad=-0.2'))
# Displaying the plot.
render(standalone=True)
colour.wavelength_to_XYZ(546.1, colour.STANDARD_OBSERVERS_CMFS['CIE 1931 2 Degree Standard Observer'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ipsl', 'ipsl-cm6a-lr', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
DOC.set_value("Other: sea ice [thickness, concentration, velocity, temperature, heat content], snow thickness, snow temperature")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
DOC.set_value("Ocean grid")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
DOC.set_value("Hibler 1979")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
DOC.set_value("Visco-plastic")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
DOC.set_value("Other: multi-layer on a regular vertical grid")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
DOC.set_value("Other: parametrized (calculated in ocean)")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Ice formed with from prescribed thickness")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
DOC.set_value("Other: no")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Snow-ice")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
DOC.set_value("Other: one layer")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
DOC.set_value("Other: fonction of temperature and sea ice + snow thickness")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In previous weeks we have covered preprocessing our data, dimensionality reduction, and last week looked at supervised learning. This week we will be pulling these processes together into a complete project.
Step2: FeatureUnion
Step10: Advanced Pipeline
Step11: Exercises
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
%matplotlib inline
# http://scikit-learn.org/stable/auto_examples/plot_digits_pipe.html#example-plot-digits-pipe-py
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
print(estimator)
# http://scikit-learn.org/stable/auto_examples/feature_stacker.html#example-feature-stacker-py
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA()
# Maybe some original features where good, too?
selection = SelectKBest()
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
import numpy as np
from sklearn.base import TransformerMixin
class ModelTransformer(TransformerMixin):
Wrap a classifier model so that it can be used in a pipeline
def __init__(self, model):
self.model = model
def fit(self, *args, **kwargs):
self.model.fit(*args, **kwargs)
return self
def transform(self, X, **transform_params):
return self.model.predict_proba(X)
def predict_proba(self, X, **transform_params):
return self.transform(X, **transform_params)
class VarTransformer(TransformerMixin):
Compute the variance
def transform(self, X, **transform_params):
var = X.var(axis=1)
return var.reshape((var.shape[0],1))
def fit(self, X, y=None, **fit_params):
return self
class MedianTransformer(TransformerMixin):
Compute the median
def transform(self, X, **transform_params):
median = np.median(X, axis=1)
return median.reshape((median.shape[0],1))
def fit(self, X, y=None, **fit_params):
return self
class ChannelExtractor(TransformerMixin):
Extract a single channel for downstream processing
def __init__(self, channel):
self.channel = channel
def transform(self, X, **transformer_params):
return X[:,:,self.channel]
def fit(self, X, y=None, **fit_params):
return self
class FFTTransformer(TransformerMixin):
Convert to the frequency domain and then sum over bins
def transform(self, X, **transformer_params):
fft = np.fft.rfft(X, axis=1)
fft = np.abs(fft)
fft = np.cumsum(fft, axis=1)
bin_size = 10
max_freq = 60
return np.column_stack([fft[:,i] - fft[:,i-bin_size]
for i in range(bin_size, max_freq, bin_size)])
def fit(self, X, y=None, **fit_params):
return self
import numpy as np
import os
import pickle
from sklearn.cross_validation import cross_val_score, StratifiedShuffleSplit
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import RandomForestClassifier
import get_traces
import transformers as trans
def build_pipeline(X):
Helper function to build the pipeline of feature transformations.
We do the same thing to each channel so rather than manually copying changes
for all channels this is automatically generated
channels = X.shape[2]
pipeline = Pipeline([
('features', FeatureUnion([
('select_%d_pipeline' % i,
Pipeline([('select_%d' % i, trans.ChannelExtractor(i)),
('channel features', FeatureUnion([
('var', trans.VarTransformer()),
('median', trans.MedianTransformer()),
('fft', trans.FFTTransformer()),
])),
])
) for i in range(channels)])),
('classifier', trans.ModelTransformer(RandomForestClassifier(
n_estimators=500,
max_depth=None,
min_samples_split=1,
random_state=0))),
])
return pipeline
def get_transformed_data(patient, func=get_traces.get_training_traces):
Load in all the data
X = []
channels = get_traces.get_num_traces(patient)
# Reading in 43 Gb of data . . .
for i in range(channels):
x, y = func(patient, i)
X.append(x)
return (np.dstack(X), y)
all_labels = []
all_predictions = np.array([])
folders = [i for i in os.listdir(get_traces.directory) if i[0] != '.']
folders.sort()
for folder in folders:
print('Starting %s' % folder)
print('getting data')
X, y = get_transformed_data(folder)
print(X.shape)
print('stratifiedshufflesplit')
cv = StratifiedShuffleSplit(y,
n_iter=5,
test_size=0.2,
random_state=0,)
print('cross_val_score')
pipeline = build_pipeline(X)
# Putting this in a list is unnecessary for just one pipeline - use to compare multiple pipelines
scores = [
cross_val_score(pipeline, X, y, cv=cv, scoring='roc_auc')
]
print('displaying results')
for score, label in zip(scores, ['pipeline',]):
print("AUC: {:.2%} (+/- {:.2%}), {:}".format(score.mean(),
score.std(), label))
clf = pipeline
print('Fitting full model')
clf.fit(X, y)
print('Getting test data')
testing_data, files = get_transformed_data(folder,
get_traces.get_testing_traces)
print('Generating predictions')
predictions = clf.predict_proba(testing_data)
print(predictions.shape, len(files))
with open('%s_randomforest_predictions.pkl' % folder, 'wb') as f:
pickle.dump((files, predictions[:,1]), f)
from sklearn import grid_search
from sklearn import datasets
from sklearn import neighbors
from sklearn import metrics
from sklearn.pipeline import Pipeline
from sklearn import grid_search
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
np.random.seed(0)
split = np.random.random(y.shape) > 0.3
X_train = X[split]
y_train = y[split]
X_test = X[np.logical_not(split)]
y_test = y[np.logical_not(split)]
print(X_train.shape, y_train.shape)
knn = neighbors.KNeighborsRegressor()
pipe = Pipeline(steps=[('knn', knn)])
parameters = [1,2,3,4,5,6,7,8,9,10]
grid = grid_search.GridSearchCV(pipe, dict(knn__n_neighbors=parameters))
grid.fit(X_train, y_train)
plt.plot(y_test, grid.predict(X_test), 'k.')
plt.show()
print(metrics.mean_squared_error(y_test, grid.predict(X_test)))
grid.get_params()
best=grid.best_estimator_.named_steps['knn'].n_neighbors
print('optimal number of clusters:', best)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Matriz de prueba
Step2: Ejecutar y tomar las dos primeras filas para verificar si sí funciona el algoritmo
Step3: Se guarda el archivo en $HOMEHDFS/matriz5
Step4: Implementación multiplicación de matrices
Step5: Función para separar y limpiar los números del archivo. Recibe cada línea y la separa por espacios, luego limpia ,, [ y ]
|
<ASSISTANT_TASK:>
Python Code:
import random
def inicializarMatrizNueva(filas,columnas,valorMaximoNumero):
matriz = []
for i in range(filas):
fil = []
for j in range(columnas):
a = random.randrange(valorMaximoNumero)
fil.append(a)
matriz.append(fil)
matriz = sc.parallelize(matriz)
return matriz
matriz = inicializarMatrizNueva(10000,20,1000)
print(matriz.take(2))
matriz2 = inicializarMatrizNueva(20,10000,1000)
print(matriz.take(2))
matriz.saveAsTextFile("matriz_n8")
matriz2.saveAsTextFile("matriz_n9")
matriz2=sc.textFile("matriz8")
def quitar(x):
y = x.split()
return map(lambda w: int(w.replace(",","").replace("[","").replace("]","")),y)
matriz2 = matriz2.map(quitar)
print(matriz2.take(2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the Test Set
Step2: Train the Model
Step3: Regression
Step4: Optimize Hyperparameters
Step5: array([ 1.47895967, 3.99711988, 0.16295754])
|
<ASSISTANT_TASK:>
Python Code:
x = np.random.RandomState(0).uniform(-5, 5, 20)
#x = np.random.uniform(-5, 5, 20)
y = x*np.sin(x)
#y += np.random.normal(0,0.5,y.size)
y += np.random.RandomState(34).normal(0,0.5,y.size)
x_star = np.linspace(-5,5,500)
#Define the basic kernels
k1 = SqExp(0.45,2)
k2 = RQ(0.5,2,3)
k3 = ExpSine(0.1,2,30)
k4 = WhiteNoise(0.01)
#Define the combined kernel
k1 = k1+k4
#Instantiate the GP predictor object with the desired kernel
gp = GPR(k1)
#Train the model
gp.train(x,y)
#Predict a new set of test data given the independent variable observations
y_mean1,y_var1 = gp.predict(x_star,False)
#Convert the variance to the standard deviation
y_err1 = np.sqrt(y_var1)
plt.scatter(x,y,s=30)
plt.plot(x_star,x_star*np.sin(x_star),'r:')
plt.plot(x_star,y_mean1,'k-')
plt.fill_between(x_star,y_mean1+y_err1,y_mean1-y_err1,alpha=0.5)
gp.optimize('SLSQP')
#Predict a new set of test data given the independent variable observations
y_mean2,y_var2 = gp.predict(x_star,False)
#Convert the variance to the standard deviation
y_err2 = np.sqrt(y_var2)
plt.scatter(x,y,s=30)
plt.plot(x_star,x_star*np.sin(x_star),'r:')
plt.plot(x_star,y_mean2,'k-')
plt.fill_between(x_star,y_mean2+y_err2,y_mean2-y_err2,alpha=0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting and Analysis Functions
Step2: Data Loading
|
<ASSISTANT_TASK:>
Python Code:
# import the modules
import sys
import GPy
import csv
import numpy as np
import cPickle as pickle
import scipy.stats as stats
import sklearn.metrics as metrics
from matplotlib import pyplot as plt
%matplotlib notebook
# function to compute reconstruction error
def reconstructionError(model, valData, testData, mKey, kKey, optimizeFlag=False):
nSamplesVal = valData[mKey].shape[0]
nSamplesTest = testData[mKey].shape[0]
nDimIn = valData[kKey].shape[1]
nDimOut = valData[mKey].shape[1]
qDim = model.X.mean.shape[1]
# computing reconstruction error for test1, test2 with variances
predictVal = np.zeros((nSamplesVal,nDimOut))
predictTest = np.zeros((nSamplesTest,nDimOut))
for n in range(nSamplesVal):
yIn = valData[kKey][n,:]
yTrueOut = valData[mKey][n,:]
[xPredict, infX] = model.Y0.infer_newX(yIn[None,:], optimize=False)
yOut = model.predict(xPredict.mean, Yindex=1)
sys.stdout.write('.')
predictVal[n,:] = yOut[0]
sys.stdout.write('\n')
for n in range(nSamplesTest):
yIn = testData[kKey][n,:]
yTrueOut = testData[mKey][n,:]
[xPredict, infX] = model.Y0.infer_newX(yIn[None,:], optimize=optimizeFlag)
yOut = model.predict(xPredict.mean, Yindex=1)
sys.stdout.write('.')
predictTest[n,:] = yOut[0]
sys.stdout.write('\n')
results = {}
valResults = {}
testResults = {}
valResults['pred'] = predictVal
testResults['pred'] = predictTest
valErrors = np.sqrt(metrics.mean_squared_error(valData[mKey],predictVal,multioutput='raw_values'))
testErrors = np.sqrt(metrics.mean_squared_error(testData[mKey],predictTest,multioutput='raw_values'))
valNormErrors = np.divide(np.sqrt(metrics.mean_squared_error(valData[mKey],predictVal,multioutput='raw_values')),
valData[mKey].max(axis=0) - valData[mKey].min(axis=0))
testNormErrors = np.divide(np.sqrt(metrics.mean_squared_error(testData[mKey],predictTest,multioutput='raw_values')),
testData[mKey].max(axis=0) - testData[mKey].min(axis=0))
valCorr = np.zeros((1,nDimOut))
testCorr = np.zeros((1,nDimOut))
for d in range(dims[1]):
valCorr[0,d],_ = stats.pearsonr(valData[mKey][:,d],predictVal[:,d])
testCorr[0,d],_ = stats.pearsonr(testData[mKey][:,d],predictTest[:,d])
valResults['rmse'] = valErrors
testResults['rmse'] = testErrors
valResults['nrmse'] = valNormErrors
testResults['nrmse'] = testNormErrors
valResults['corr'] = valCorr
testResults['corr'] = testCorr
results['train'] = valResults
results['test'] = testResults
return results
nShr = 4
nPos = 6
names = []
dims = [1,7500,8]
keys = ['Time','Cloud','TopCoord']
for nS in range(nShr):
for nP in range(nPos):
names.append('K1S%dP%dT1' % (nS+1,nP+1))
# create directory for results
dName = '../Results/Exp6'
if not os.path.exists(dName):
os.makedirs(dName)
# load dataset
Data = pickle.load(open('../Data/Data.p','rb'))
# loop over the kinect keys
kinectExt = 'C'
kinectDim = 7500
kinectKey = 'Cloud'
mocapDim = 8
mocapExt = 'T'
mocapKey = 'TopCoord'
keys = [kinectKey,mocapKey]
expName = '%s%s' % (kinectExt,mocapExt)
for sInd in range(nShr):
for pInd in range(nPos):
valData = {}
testData = {}
testInd = sInd*nPostures+pInd
valInd = sInd*nPostures+(pInd+1)%nPostures
print 'Cycle:%d,%d' % (sInd+1,pInd+1)
print names[valInd],names[testInd]
for key in keys:
valData[key] = Data[names[valInd]][key]
testData[key] = Data[names[testInd]][key]
# load the trained MRD model
mrdModel = pickle.load(open('../Models/Model%d%d.p' % (sInd+1,pInd+1),'rb'))
# apply inference to test and val data
results = reconstructionError(mrdModel,valData,testData,mocapKey,kinectKey,optimizeFlag=True)
# save results to file
pickle.dump(results,open('../Results/Exp6/MRDRes%d%d.p' % (sInd+1,pInd+1),'wb'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now for a bunch of helpers. We'll use these in a moment; skip over them for now.
Step2: Okay, let's get started.
|
<ASSISTANT_TASK:>
Python Code:
import tempfile
from typing import List
import equinox as eqx # https://github.com/patrick-kidger/equinox
import jax
import jax.numpy as jnp
import optax # https://github.com/deepmind/optax
import pysr # https://github.com/MilesCranmer/PySR
import sympy
# Note that PySR, which we use for symbolic regression, uses Julia as a backend.
# You'll need to install a recent version of Julia if you don't have one.
# (And can get funny errors if you have a too-old version of Julia already.)
# You may also need to restart Python after running `pysr.install()` the first time.
pysr.silence_julia_warning()
pysr.install(quiet=True)
def quantise(expr, quantise_to):
if isinstance(expr, sympy.Float):
return expr.func(round(float(expr) / quantise_to) * quantise_to)
elif isinstance(expr, sympy.Symbol):
return expr
else:
return expr.func(*[quantise(arg, quantise_to) for arg in expr.args])
class SymbolicFn(eqx.Module):
fn: callable
parameters: jnp.ndarray
def __call__(self, x):
# Dummy batch/unbatching. PySR assumes its JAX'd symbolic functions act on
# tensors with a single batch dimension.
return jnp.squeeze(self.fn(x[None], self.parameters))
class Stack(eqx.Module):
modules: List[eqx.Module]
def __call__(self, x):
return jnp.stack([module(x) for module in self.modules], axis=-1)
def expr_size(expr):
return sum(expr_size(v) for v in expr.args) + 1
def _replace_parameters(expr, parameters, i_ref):
if isinstance(expr, sympy.Float):
i_ref[0] += 1
return expr.func(parameters[i_ref[0]])
elif isinstance(expr, sympy.Symbol):
return expr
else:
return expr.func(
*[_replace_parameters(arg, parameters, i_ref) for arg in expr.args]
)
def replace_parameters(expr, parameters):
i_ref = [-1] # Distinctly sketchy approach to making this conversion.
return _replace_parameters(expr, parameters, i_ref)
def main(
symbolic_dataset_size=2000,
symbolic_num_populations=100,
symbolic_population_size=20,
symbolic_migration_steps=4,
symbolic_mutation_steps=30,
symbolic_descent_steps=50,
pareto_coefficient=2,
fine_tuning_steps=500,
fine_tuning_lr=3e-3,
quantise_to=0.01,
):
#
# First obtain a neural approximation to the dynamics.
# We begin by running the previous example.
#
# Runs the Neural ODE example.
# This defines the variables `ts`, `ys`, `model`.
print("Training neural differential equation.")
%run neural_ode.ipynb
#
# Now symbolically regress across the learnt vector field, to obtain a Pareto
# frontier of symbolic equations, that trades loss against complexity of the
# equation. Select the "best" from this frontier.
#
print("Symbolically regressing across the vector field.")
vector_field = model.func.mlp # noqa: F821
dataset_size, length_size, data_size = ys.shape # noqa: F821
in_ = ys.reshape(dataset_size * length_size, data_size) # noqa: F821
in_ = in_[:symbolic_dataset_size]
out = jax.vmap(vector_field)(in_)
with tempfile.TemporaryDirectory() as tempdir:
symbolic_regressor = pysr.PySRRegressor(
niterations=symbolic_migration_steps,
ncyclesperiteration=symbolic_mutation_steps,
populations=symbolic_num_populations,
npop=symbolic_population_size,
optimizer_iterations=symbolic_descent_steps,
optimizer_nrestarts=1,
procs=1,
verbosity=0,
tempdir=tempdir,
temp_equation_file=True,
output_jax_format=True,
)
symbolic_regressor.fit(in_, out)
best_equations = symbolic_regressor.get_best()
expressions = [b.sympy_format for b in best_equations]
symbolic_fns = [
SymbolicFn(b.jax_format["callable"], b.jax_format["parameters"])
for b in best_equations
]
#
# Now the constants in this expression have been optimised for regressing across
# the neural vector field. This was good enough to obtain the symbolic expression,
# but won't quite be perfect -- some of the constants will be slightly off.
#
# To fix this we now plug our symbolic function back into the original dataset
# and apply gradient descent.
#
print("Optimising symbolic expression.")
symbolic_fn = Stack(symbolic_fns)
flat, treedef = jax.tree_flatten(
model, is_leaf=lambda x: x is model.func.mlp # noqa: F821
)
flat = [symbolic_fn if f is model.func.mlp else f for f in flat] # noqa: F821
symbolic_model = jax.tree_unflatten(treedef, flat)
@eqx.filter_grad
def grad_loss(symbolic_model):
vmap_model = jax.vmap(symbolic_model, in_axes=(None, 0))
pred_ys = vmap_model(ts, ys[:, 0]) # noqa: F821
return jnp.mean((ys - pred_ys) ** 2) # noqa: F821
optim = optax.adam(fine_tuning_lr)
opt_state = optim.init(eqx.filter(symbolic_model, eqx.is_inexact_array))
@eqx.filter_jit
def make_step(symbolic_model, opt_state):
grads = grad_loss(symbolic_model)
updates, opt_state = optim.update(grads, opt_state)
symbolic_model = eqx.apply_updates(symbolic_model, updates)
return symbolic_model, opt_state
for _ in range(fine_tuning_steps):
symbolic_model, opt_state = make_step(symbolic_model, opt_state)
#
# Finally we round each constant to the nearest multiple of `quantise_to`.
#
trained_expressions = []
for module, expression in zip(symbolic_model.func.mlp.modules, expressions):
expression = replace_parameters(expression, module.parameters.tolist())
expression = quantise(expression, quantise_to)
trained_expressions.append(expression)
print(f"Expressions found: {trained_expressions}")
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Description
Step2: (a)
Step3: The base apparent power is $S_\text{base} = 100\,MVA$ , and the base line voltage on the secondary side is $V_{LS_\text{base}} = 115\,kV$ , so the base value of the secondary line current is
Step4: so the per-unit secondary current is
Step5: The per-unit phasor diagram is shown below
Step6: and the voltage regulation is
Step7: (c)
Step8: The base impedance of the transformer referred to the low-voltage side is
Step9: Each per-unit impedance is converted to actual ohms referred to the low-voltage side by multiplying it by this base impedance.
Step10: (d)
Step11: and the actual losses in the series resistance are
Step12: The per-unit losses in the excitation branch are
Step13: and the actual losses in the excitation branch are
Step14: The per-unit power supplied to the load
Step15: Therefore, the transformer’s efficiency is
|
<ASSISTANT_TASK:>
Python Code:
%pylab notebook
Sbase = 100e6 # [VA]
Vp0 = 230e3 # [V]
Vs0 = 115e3 # [V]
Rc_pu = 100.0
Xm_pu = 20.0
Req_pu = 0.015
Xeq_pu = 0.06
Sload = 80e6 # [VA]
PF = 0.8
Vls_a = Vs0
Ils_a = Sload / (sqrt(3)*Vls_a)
print('Ils_a = {:.0f} A'.format(Ils_a))
Vls_base = Vs0
Ils_base_a = Sbase / (sqrt(3)*Vls_base)
print('Ils_base_a = {:.0f} A'.format(Ils_base_a))
Ils_pu_a = Ils_a / Ils_base_a
ILS_pu_a_angle = -arccos(PF)
ILS_pu_a = Ils_pu_a * (cos(ILS_pu_a_angle) + sin(ILS_pu_a_angle)*1j)
print('ILS_pu_a = {:.1f} ∠{:.2f}°'.format(
abs(ILS_pu_a), ILS_pu_a_angle/pi*180))
VS_pu = 1.0
Zeq_pu = Req_pu + Xeq_pu*1j
VP_pu = VS_pu + ILS_pu_a*Zeq_pu
VP_pu_angle = arctan(VP_pu.imag/VP_pu.real)
print('VS_pu = {:.3f} ∠{:.1f}°'.format(
abs(VP_pu), VP_pu_angle/pi*180))
VR = (abs(VP_pu) - abs(VS_pu)) / abs(VS_pu) * 100
print('''
VR = {:.1f}%
=========
'''.format(VR))
Vphis_base = Vls_base / sqrt(3)
print('Vphi_base = {:.1f} kV'.format(Vphis_base/1000))
Zbase = 3 * Vphis_base**2 / Sbase
print('Zbase = {:.0f} Ω'.format(Zbase))
Req = Req_pu*Zbase
Xeq = Xeq_pu*Zbase
Rc = Rc_pu*Zbase
Xm = Xm_pu*Zbase
print('''
Req,s = {:.2f} Ω Xeq,s = {:.2f} Ω
Rc,s = {:.1f} kΩ Xm,s = {:.2f} kΩ
==================================
'''.format(Req, Xeq, Rc/1000, Xm/1000))
Peq_pu = abs(ILS_pu_a)**2 *Req_pu
print('Peq_pu = {:.4f}'.format(Peq_pu))
Peq = Sbase * Peq_pu
print('''
Peq = {:.2f} MW
=============
'''.format(Peq/1e6))
Rex_pu = Rc_pu
Pex_pu = abs(VP_pu)**2 / Rex_pu
print('Pex_pu = {:.4f}'.format(Pex_pu))
Pex = Sbase*Pex_pu
print('''
Pex = {:.2f} MW
=============
'''.format(Pex/1e6))
Pload_pu = PF*Sload/Sbase
print('Pload_pu = {:.2f}'.format(Pload_pu))
Pin_pu= Pload_pu + Peq_pu + Pex_pu
Pout_pu = Pload_pu
eta = Pout_pu/Pin_pu * 100
print('''
η = {:.1f} %
==========
'''.format(eta))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model
Step2: Testing
Step3: Because I don't want to re-run the training & testing everything time I come back to this project, we will save the result to a file.
Step4: The size of the test set is 1048 documents. Overall, the classifier has an accuracy of 84.1%.
Step5: Given the actual labels, we find that our classifier performs well for the gun labels (96% correct), okay for the mideast labels (90% correct), and the worst for "misc" labels (63% correct).
|
<ASSISTANT_TASK:>
Python Code:
import glob
import pandas as pd
samples = {
'train':{},
'test':{}
}
files = glob.glob('20news-bydate-*/talk.politics*/*')
for s in samples.keys():
for c in ['guns', 'mideast', 'misc']:
samples[s][c] = samples[s].get(c, len(filter(lambda x: s in x and c in x, files)))
print 'Number of training documents:\t', sum(samples['train'].values())
print 'Number of testing documents:\t', sum(samples['test'].values())
pd.DataFrame.from_dict(samples)
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import glob
import string
import math
import operator
def count_words(words):
wc = {}
for word in words:
wc[word] = wc.get(word, 0.0) + 1.0
return wc
table = string.maketrans("","")
stop = stopwords.words("english")
snowballstemmer = SnowballStemmer("english")
def preprocess(f):
## will need 'table', 'stop', and 'snowballstemmer' predefined
text = open(f).read().translate(table, string.punctuation).lower()
words = nltk.word_tokenize(text)
words = [i for i in words if i not in stop]
words = [snowballstemmer.stem(i) for i in words]
return words
vocab = {}
word_counts = {
"guns":{},
"mideast":{},
"misc":{}
}
priors = {
"guns":0.,
"mideast":0.,
"misc":0.
}
docs = []
for f in glob.glob('20news-bydate-train/talk.politics*/*'):
if 'guns' in f:
category = 'guns'
elif 'mideast' in f:
category = 'mideast'
else:
category = 'misc'
docs.append((category, f))
priors[category] += 1
words = preprocess(f)
counts = count_words(words)
for word, count in counts.items():
if word not in vocab:
vocab[word] = 0.0
if word not in word_counts[category]:
word_counts[category][word] = 0.0
vocab[word] += count
word_counts[category][word] += count
results = {
"guns":{
"idx":0,
"results":{0:0.0, 1:0.0, 2:0.0}
},
"mideast":{
"idx":1,
"results":{0:0.0, 1:0.0, 2:0.0}
},
"misc":{
"idx":2,
"results":{0:0.0, 1:0.0, 2:0.0}
}
}
docfail = []
prior_guns = priors["guns"] / sum(priors.values())
prior_mideast = priors["mideast"] / sum(priors.values())
prior_misc = priors["misc"] / sum(priors.values())
for new_doc in glob.glob('20news-bydate-test/talk.politics.*/*'):
if 'guns' in new_doc:
category = 'guns'
elif 'mideast' in new_doc:
category = 'mideast'
else:
category = 'misc'
words = preprocess(new_doc)
counts = count_words(words)
## To prevent computational errors, will perform operations in logspace, log(probabilities)
log_prob_guns = 0.0
log_prob_mideast = 0.0
log_prob_misc = 0.0
for w, cnt in counts.items():
## heuristic: skip words not seen before, or words < 3 letters long
if not w in vocab or len(w) <= 3:
continue
## calculate prob that the word occurs at all
p_word = vocab[w] / sum(vocab.values())
## calculate P(word|category)
p_w_given_guns = word_counts["guns"].get(w, 0.0) / sum(word_counts["guns"].values())
p_w_given_mideast = word_counts["mideast"].get(w, 0.0) / sum(word_counts["mideast"].values())
p_w_given_misc = word_counts["misc"].get(w, 0.0) / sum(word_counts["misc"].values())
if p_w_given_guns > 0:
log_prob_guns += math.log(cnt * p_w_given_guns / p_word)
if p_w_given_mideast > 0:
log_prob_mideast += math.log(cnt * p_w_given_mideast / p_word)
if p_w_given_misc > 0:
log_prob_misc += math.log(cnt * p_w_given_misc / p_word)
try:
max_index, max_value = max(enumerate([
math.exp(log_prob_guns + math.log(prior_guns)), #p_guns_given_w
math.exp(log_prob_mideast + math.log(prior_mideast)), #p_mideast_given_w
math.exp(log_prob_misc + math.log(prior_misc))]), #p_misc_given_w
key=operator.itemgetter(1))
except:
docfail.append(new_doc)
print new_doc
continue
results[category]["results"][max_index] = results[category]["results"].get(max_index, 0.0) + 1.0
## OUPUT: documents which fail testing
print results
import json
with open('dc-results/naivebayes.json') as f:
results = json.load(f)
import pandas as pd
import numpy as np
%load_ext rpy2.ipython
%R library(ggplot2)
%R library(reshape)
from copy import deepcopy
print
r = {k:v['results'] for k,v in results.iteritems()}
df = pd.DataFrame.from_dict(r)#, orient="index")
df.index = ['predict_guns', 'predict_mideast', 'predict_misc']
dfcounts = deepcopy(df)
print dfcounts
if (sum(df.guns) != 0): df.guns = df.guns / sum(df.guns)
if (sum(df.mideast) != 0): df.mideast = df.mideast / sum(df.mideast)
if (sum(df.misc) != 0): df.misc = df.misc / sum(df.misc)
df
_total = sum(sum(dfcounts.values))
print 'Number of test samples: %d' % _total
print 'Percent of test set labelled correctly: %0.1f%%' % (sum(np.diagonal(dfcounts)) / _total * 100)
%%R -i df
df = melt(df)
colnames(df) = c("expected", "value")
df = cbind(df, classification=rep(c('guns', 'mideast', 'misc'), 3))
ggplot(df, aes(x=expected, y=value, fill=classification)) +
geom_bar(stat="identity") +
xlab("Actual label") +
ylab("Proportion")
%%R -i dfcounts
dat = cbind(expected=colSums(dfcounts), observed=rowSums(dfcounts))
dat = melt(dat)
colnames(dat) <- c("Label", "Type", "Count")
ggplot(dat, aes(x=Label, y=Count, fill=Type)) +
geom_bar(stat="identity", position="dodge")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in an Image
Step10: Ideas for Lane Detection Pipeline
Step11: Test Images
Step12: Build a Lane Finding Pipeline
Step13: Test on Videos
Step14: Let's try the one with the solid white lane on the right first ...
Step16: Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
Step18: Improve the draw_lines() function
Step20: Writeup and Submission
|
<ASSISTANT_TASK:>
Python Code:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
#reading in an image
image = mpimg.imread('images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimesions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
zeros = np.zeros(shape=(10, 10))
nums = np.arange(0, 10)
zeros[1:4, :] = nums
print(zeros)
import math
def grayscale(img):
Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def canny(img, low_threshold, high_threshold):
Applies the Canny transform
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
Applies a Gaussian Noise kernel
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def create_vertices(img):
'img' is a canny transform edge image
Adjust our vertices here to be a trapezoid
The top of the trapezoid should be where we first detect edges from the center looking bottom-up
Sides of the trapezoid should extend to edges (plus buffer)
ysize, xsize = img.shape[0], img.shape[1]
bottom_ignore = ysize//6
ybuffer = ysize//30
xbuffer_top = xsize//50
xbuffer_bot = xbuffer_top*2
side_search_buffer = ybuffer//2
# Let's find the last white pixel's index in the center column.
# This will give us an idea of where our region should be
# We ignore a certain portion of the bottom of the screen so we get a better region top
# - This is partly because car hoods can obsure the region
center_white = img[:ysize-bottom_ignore, xsize//2] == 255
indices = np.arange(0, center_white.shape[0])
indices[~center_white] = 0
last_white_ind = np.amax(indices)
# If our first white pixel is too close to the bottom of the screen, default back to the screen center
# region_top_y = (last_white_ind if last_white_ind < 4*ysize//5 else ysize//2) + ybuffer
region_top_y = min(last_white_ind + ybuffer, ysize-1)
# Now we need to find the x-indices for the top segment of our region
# To do this we will look left and right from our center point until we find white
y_slice_top = max(region_top_y - side_search_buffer, 0)
y_slice_bot = min(region_top_y + side_search_buffer, ysize-1)
region_top_white = np.copy(img[y_slice_top:y_slice_bot, :]) == 255
indices = np.zeros_like(region_top_white, dtype='int32')
indices[:, :] = np.arange(0, xsize)
indices[~region_top_white] = 0
# Separate into right and left sides we can grab our indices easier:
# Right side min and left side max
right_side = np.copy(indices)
right_side[right_side < xsize//2] = xsize*2 # Large number because we will take min
left_side = np.copy(indices)
left_side[left_side > xsize//2] = 0
region_top_x_left = max(np.amax(left_side) - xbuffer_top, 0)
region_top_x_right = min(np.amin(right_side) + xbuffer_top, xsize)
# Now we do the same thing for the bottom
# Look left and right from the center until we hit white
indices = np.arange(0, xsize)
region_bot_white = img[ysize-bottom_ignore, :] == 255
indices[~region_bot_white] = 0
# Separate into right and left sides we can grab our indices easier:
# Right side min and left side max
right_side = np.copy(indices)
right_side[right_side < xsize//2] = xsize*2 # Large number because we will take min
left_side = np.copy(indices)
left_side[left_side > xsize//2] = 0
region_bot_x_left = max(np.amax(left_side) - xbuffer_bot, 0)
region_bot_x_right = min(np.amin(right_side) + xbuffer_bot, xsize)
# Because of our bottom_ignore, we need to extrapolate these bottom x coords to bot of screen
left_slope = ((ysize-bottom_ignore) - region_top_y)/(region_bot_x_left - region_top_x_left)
right_slope = ((ysize-bottom_ignore) - region_top_y)/(region_bot_x_right - region_top_x_right)
# Let's check these slopes we don't divide by 0 or inf
if abs(left_slope < .001):
left_slope = .001 if left_slope > 0 else -.001
if abs(right_slope < .001):
right_slope = .001 if right_slope > 0 else -.001
if abs(left_slope) > 1000:
left_slope = 1000 if left_slope > 0 else -1000
if abs(right_slope) > 1000:
right_slope = 1000 if right_slope > 0 else -1000
# b=y-mx
left_b = region_top_y - left_slope*region_top_x_left
right_b = region_top_y - right_slope*region_top_x_right
# x=(y-b)/m
region_bot_x_left = max(int((ysize-1-left_b)/left_slope), 0)
region_bot_x_right = min(int((ysize-1-right_b)/right_slope), xsize-1)
verts = [
(region_bot_x_left, ysize),
(region_top_x_left, region_top_y),
(region_top_x_right, region_top_y),
(region_bot_x_right, ysize)
]
return np.array([verts], dtype=np.int32)
def region_of_interest(img):
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
verts = create_vertices(img)
cv2.fillPoly(mask, verts, ignore_mask_color)
#Let's return an image of the regioned area in lines
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
cv2.polylines(line_img, verts, isClosed=True, color=[0, 255, 0], thickness=5)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image, line_img
def draw_lines(img, lines, color=[255, 0, 0], thickness=8):
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
if lines is None: return lines
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
avg_lines = average_lines(lines, img)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
# draw_lines(line_img, lines)
draw_lines(line_img, avg_lines, color=[138,43,226])
return line_img
def average_lines(lines, img):
'''
img should be a regioned canny output
'''
if lines is None: return lines
positive_slopes = []
positive_xs = []
positive_ys = []
negative_slopes = []
negative_xs = []
negative_ys = []
min_slope = .3
max_slope = 1000
for line in lines:
for x1, y1, x2, y2 in line:
slope = (y2-y1)/(x2-x1)
if abs(slope) < min_slope or abs(slope) > max_slope: continue # Filter our slopes
# We only need one point sample and the slope to determine the line equation
positive_slopes.append(slope) if slope > 0 else negative_slopes.append(slope)
positive_xs.append(x1) if slope > 0 else negative_xs.append(x1)
positive_ys.append(y1) if slope > 0 else negative_ys.append(y1)
# We need to calculate our region_top_y from the canny image so we know where to extend our lines to
ysize, xsize = img.shape[0], img.shape[1]
XX, YY = np.meshgrid(np.arange(0, xsize), np.arange(0, ysize))
white = img == 255
YY[~white] = ysize*2 # Large number because we will take the min
region_top_y = np.amin(YY)
new_lines = []
if len(positive_slopes) > 0:
m = np.mean(positive_slopes)
avg_x = np.mean(positive_xs)
avg_y = np.mean(positive_ys)
b = avg_y - m*avg_x
# We have m and b, so with a y we can get x = (y-b)/m
x1 = int((region_top_y - b)/m)
x2 = int((ysize - b)/m)
new_lines.append([(x1, region_top_y, x2, ysize)])
if len(negative_slopes) > 0:
m = np.mean(negative_slopes)
avg_x = np.mean(negative_xs)
avg_y = np.mean(negative_ys)
b = avg_y - m*avg_x
# We have m and b, so with a y we can get x = (y-b)/m
x1 = int((region_top_y - b)/m)
x2 = int((ysize - b)/m)
new_lines.append([(x1, region_top_y, x2, ysize)])
return np.array(new_lines)
def weighted_img(initial_img, img, a=0.8, b=1., l=0.):
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
return cv2.addWeighted(initial_img, a, img, b, l)
def save_img(img, name):
mpimg.imsave('./images/output/{0}'.format(name if '.' in name else '{0}.png'.format(name)), img)
import os
image_names = [name for name in os.listdir("./images") if '.' in name]
image_names.sort()
print(image_names)
images = [mpimg.imread('./images/{0}'.format(name)) for name in image_names]
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images directory.
def detect_lines(img, debug=False):
ysize, xsize = img.shape[0], img.shape[1]
blur_gray = gaussian_blur(grayscale(img), kernel_size=5)
ht = 150 # First detect gradients above. Then keep between low and high if connected to high
lt = ht//3 # Leave out gradients below
canny_edges = canny(blur_gray, low_threshold=lt, high_threshold=ht)
if debug: save_img(canny_edges, 'canny_edges_{0}'.format(index))
# Our region of interest will be dynamically decided on a per-image basis
regioned_edges, region_lines = region_of_interest(canny_edges)
rho = 2
theta = 3*np.pi/180
min_line_length = xsize//16
max_line_gap = min_line_length//2
threshold = min_line_length//4
lines = hough_lines(regioned_edges, rho, theta, threshold, min_line_length, max_line_gap)
# Let's combine the hough-lines with the canny_edges to see how we did
overlayed_lines = weighted_img(img, lines)
# overlayed_lines = weighted_img(weighted_img(img, region_lines, a=1), lines)
if debug: save_img(overlayed_lines, 'overlayed_lines_{0}'.format(index))
return overlayed_lines
for index, img in enumerate(images):
print('Image:', index)
# debug = (True if index == 0 else False)
debug = True
detect_lines(img, debug)
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
return detect_lines(image)
white_output = './videos/output/white.mp4'
clip1 = VideoFileClip("./videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(white_output))
yellow_output = './videos/output/yellow.mp4'
clip2 = VideoFileClip('./videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(yellow_output))
challenge_output = './videos/output/challenge.mp4'
clip2 = VideoFileClip('./videos/challengeShadowCurve.mp4')
challenge_clip = clip2.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(challenge_output))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Realizamos algunos imports necesarios
Step2: Antes de nada, vamos a realizar una pequeña muestra de los resultados obtenidos con una única imagen.
Step3: Mostramos los resultados
Step4: Una vez, que ya hemos visto un ejemplo sobre una imagen. Procedemos a aplicar la técnica de Data Augmentation sobre todo nuestro conjunto de imagenes.
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import fetch_lfw_people
# Importamos mediante una de las dos alternativas
# 1ª alternativa devuelve las imagenes en RGB pero con sus
# respectivos tres valores
faces = fetch_lfw_people(color = True)
positive_patches = faces.images
positive_patches.shape
%matplotlib inline
from skimage.exposure import rescale_intensity
from skimage import io, data
from skimage.transform import rescale
import matplotlib.pyplot as plt
import sys
sys.path.append("../../rsc/img/imgaug")
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from scipy import ndimage, misc
from matplotlib import gridspec
import six
import six.moves as sm
# Conversiones necesarias
image = positive_patches[1]
image = np.array(image).astype(np.ubyte)
st = lambda aug: iaa.Sometimes(0.5, aug)
seq = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
st(iaa.Crop(percent=(0, 0.1))),
st(iaa.GaussianBlur((0, 3.0))),
st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5)),
st(iaa.Dropout((0.0, 0.1), per_channel=0.5)),
st(iaa.Add((-10, 10), per_channel=0.5)),
st(iaa.Multiply((0.5, 1.5), per_channel=0.5)),
st(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)),
st(iaa.Grayscale(alpha=(0.0, 1.0), name="Grayscale")),
st(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_px={"x": (-16, 16), "y": (-16, 16)},
rotate=(-45, 45),
shear=(-16, 16),
order=[0, 1],
cval=(0, 1.0),
mode=ia.ALL
)),
st(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25))
],
random_order=True
)
grid = seq.draw_grid_v2(image, cols=8, rows=8)
# Mostramos la imagen resultante
fig, axes = plt.subplots(nrows=8, ncols=8, figsize=(16, 16),
# sharex=True, sharey=True,
#subplot_kw={'adjustable':'box-forced'}
)
axes = axes.ravel()
for index in range(len(grid)):
axes[index].imshow(grid[index])
axes[index].axis('off');
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=0.9, bottom=0, left=0,right=1)
plt.show()
for img_index in range(len(positive_patches)):
img = positive_patches[img_index]
# 1º Realizamos las conversiones necesarias
# para que todas las imagenes puedan sen procesadas
img = np.array(img).astype(np.ubyte)
# 2º Procesamos imagen
grid = seq.draw_grid_v2(img, cols=8, rows=8)
# 3º Guardamos imagen
for j in range(len(grid)):
io.imsave("../../rsc/img/imgaug/"+str(img_index)+str(j)+".jpg", grid[j])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Framework
Step2: Toy example
Step3: Define a logistic regression for debugging.
Step4: Start with batch_size = 1000 to make sure every step lowers the loss, then try stochastic version.
Step5: Train
Step6: Digit classification
Step7: One-hot encode the labels first.
Step8: Compare ReLU, ELU, LeakyReLU, SoftPlus activation functions. You would better pick the best optimizer params for each of them, but it is overkill for now. Use an architecture of your choice for the comparison. Write your personal opinion on the activation functions, think about computation times too. Plot the losses both from activation functions comparison comparison on one plot. Please find a scale (log?) when the lines are distinguishable, do not forget about naming the axes, the plot should be goodlooking. 2.5 points
Step9: As we can see on the plots and table ReLU gives the best result and has the lowest computational time compared to others. But one should to keep in mind that ReLU might fail training procedure after big number of epochs due to the effect called dead neurons. The best about ReLU is that it passes backward the maximum number of errors, in other words it suffers less from the vanishing gradient problem.
Step10: Batch Normalization gives a way better results. Initially, it was widely accepted that BN decreases so-called internal covariate shift. Long story short, this term refers to the change in the distribution of layer inputs caused by updates and it is believed that BN is to reduce ICS and eradicate negative impact on training.
Step11: Compared to SGD Adam stores an exponentially decaying average of past squared gradients and keeps an exponentially decaying average of past gradients similar to momentum, which helps to achieve better results in this setup at the cost of slightly increased comp. time. In general, adaptive learning strategies diagonally scale the gradient based based on estimates of the curvature.
Step12: Usually, Dropout is used when network is prone to overfitting. By randomly dropping some layers the aim of Dropout is to reduce the overfitting on training phase. Actually, this can be easuly seen on the plots if one looks at the gap between validation and training loss. In the first setup w/o dropout (orange and dashed red lines) we can see that over the time they are getting too close (even merged at some point), this potentially might cause overfitting (also train loss getting too low). In the second setup with added dropout (blue and dashed green) we can see that the gap between train and loss are keeping distance, so dropout doesn't allow train loss to be so low. Also, dropout achieves slightly better accuracy on validation.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from time import time, sleep
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
import pandas as pd
from time import time
from tqdm import tqdm
# (re-)load layers
%run homework_modules.ipynb
# Generate some data
N = 500
X1 = np.random.randn(N,2) + np.array([2,2])
X2 = np.random.randn(N,2) + np.array([-2,-2])
Y = np.concatenate([np.ones(N),np.zeros(N)])[:,None]
Y = np.hstack([Y, 1-Y])
X = np.vstack([X1,X2])
plt.scatter(X[:,0],X[:,1], c = Y[:,0], edgecolors= 'none')
#net = Sequential()
#net.add(Linear(2, 2))
#net.add(LogSoftMax())
# Test something like that then
net = Sequential()
net.add(Linear(2, 4))
net.add(ReLU())
net.add(Linear(4, 2))
net.add(LogSoftMax())
criterion = ClassNLLCriterion()
print(net)
# Iptimizer params
optimizer_config = {'learning_rate' : 1e-2, 'momentum': 0.9}
optimizer_state = {}
# Looping params
n_epoch = 20
batch_size = 128
# batch generator
def get_batches(dataset, batch_size):
X, Y = dataset
n_samples = X.shape[0]
# Shuffle at the start of epoch
indices = np.arange(n_samples)
np.random.shuffle(indices)
for start in range(0, n_samples, batch_size):
end = min(start + batch_size, n_samples)
batch_idx = indices[start:end]
yield X[batch_idx], Y[batch_idx]
loss_history = []
for i in range(n_epoch):
for x_batch, y_batch in get_batches((X, Y), batch_size):
net.zeroGradParameters()
# Forward
predictions = net.forward(x_batch)
loss = criterion.forward(predictions, y_batch)
# Backward
dp = criterion.backward(predictions, y_batch)
net.backward(x_batch, dp)
# Update weights
sgd_momentum(net.getParameters(),
net.getGradParameters(),
optimizer_config,
optimizer_state)
loss_history.append(loss)
# Visualize
display.clear_output(wait=True)
plt.figure(figsize=(8, 6))
plt.title("Training loss")
plt.xlabel("#iteration")
plt.ylabel("loss")
plt.plot(loss_history, 'b')
plt.show()
print('Current loss: %f' % loss)
import mnist
X_train, y_train, X_val, y_val, X_test, y_test = mnist.load_dataset(flatten=True)
def one_hot_encode(input, classes=10):
one_hot_targets = np.eye(classes)[input]
return one_hot_targets
y_train = one_hot_encode(y_train)
y_val = one_hot_encode(y_val)
y_test = one_hot_encode(y_test)
# Looping params
n_epoch = 25
batch_size = 500
# Learning params
optimizer_config = {'learning_rate' : 1e-2, 'momentum': 0.9, 'beta1': 0.9, 'beta2': 0.9, 'epsilon': 10e-6}
def get_accuracy(y_pred, y_target):
numbers = y_pred.argmax(axis=-1)
numbers_target = y_target.argmax(axis=-1)
return np.mean(numbers == numbers_target)
def train_epoch(net, criterion, X, y, batch_size, optimizer_config, optimizer_state, optimizer):
for x_batch, y_batch in get_batches((X, y), batch_size):
net.zeroGradParameters()
# Forward
predictions = net.forward(x_batch)
# Backward
dp = criterion.backward(predictions, y_batch)
net.backward(x_batch, dp)
# Update weights
optimizer(net.getParameters(),
net.getGradParameters(),
optimizer_config,
optimizer_state)
predictions = net.forward(X)
return criterion.forward(predictions, y), get_accuracy(predictions, y)
def print_report(nets):
df = pd.DataFrame(columns=['Network', 'Training loss', 'Validation loss', 'Training accuracy', 'Validation accuracy'])
count = 0
for net in nets:
row = []
row.append(net)
row.append(round(nets[net]['train_loss'][-1], 3))
row.append(round(nets[net]['test_loss'][-1], 3))
row.append(round(nets[net]['train_acc'][-1], 3) * 100)
row.append(round(nets[net]['test_acc'][-1], 3) * 100)
df.loc[count] = row
count+=1
return df
np.random.seed(101)
# Define the net with ReLU
net_ReLU = Sequential()
net_ReLU.add(Linear(784, 100))
net_ReLU.add(ReLU())
net_ReLU.add(Linear(100, 200))
net_ReLU.add(ReLU())
net_ReLU.add(Linear(200, 10))
net_ReLU.add(LogSoftMax())
# Define the net with ELU
net_ELU = Sequential()
net_ELU.add(Linear(784, 100))
net_ELU.add(ELU())
net_ELU.add(Linear(100, 200))
net_ELU.add(ELU())
net_ELU.add(Linear(200, 10))
net_ELU.add(LogSoftMax())
# Define the net with LeakyReLU
net_LeakyReLU = Sequential()
net_LeakyReLU.add(Linear(784, 100))
net_LeakyReLU.add(LeakyReLU())
net_LeakyReLU.add(Linear(100, 200))
net_LeakyReLU.add(LeakyReLU())
net_LeakyReLU.add(Linear(200, 10))
net_LeakyReLU.add(LogSoftMax())
# Define the net with SoftPlus
net_SoftPlus = Sequential()
net_SoftPlus.add(Linear(784, 100))
net_SoftPlus.add(SoftPlus())
net_SoftPlus.add(Linear(100, 200))
net_SoftPlus.add(SoftPlus())
net_SoftPlus.add(Linear(200, 10))
net_SoftPlus.add(LogSoftMax())
nets = {'ReLU' : {'net' : net_ReLU,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'train_loss': [],
'test_loss' : [],
'train_acc': [],
'test_acc' : [],
'time': 0,
'optimizer' : sgd_momentum},
'ELU': {'net' : net_ELU,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'train_loss': [],
'test_loss' : [],
'train_acc': [],
'test_acc' : [],
'time' : 0,
'optimizer' : sgd_momentum},
'LeakyReLU': {'net' : net_LeakyReLU,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'train_loss': [],
'test_loss' : [],
'train_acc': [],
'test_acc' : [],
'time' : 0,
'optimizer': sgd_momentum},
'SoftPlus': {'net' : net_SoftPlus,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'train_loss': [],
'test_loss' : [],
'train_acc': [],
'test_acc' : [],
'time' : 0,
'optimizer' : sgd_momentum}}
np.random.seed(101)
# Train the net
for i in tqdm(range(n_epoch)):
for net in nets:
time_start = time()
loss, acc = train_epoch(nets[net]['net'], nets[net]['criterion'], X_train, y_train,
batch_size, optimizer_config, nets[net]['opt_state'],
nets[net]['optimizer'])
time_end = time()
nets[net]['time'] += (time_end - time_start)
nets[net]['train_loss'].append(loss)
nets[net]['train_acc'].append(acc)
predictions_test = nets[net]['net'].forward(X_test)
nets[net]['test_loss'].append(nets[net]['criterion'].forward(predictions_test, y_test))
nets[net]['test_acc'].append(get_accuracy(predictions_test, y_test))
display.clear_output(wait=True)
# Plot training loss
plt.figure(figsize=(14, 7))
plt.title("Training loss")
plt.xlabel("Number of epoch")
plt.ylabel("Loss")
for net in nets:
plt.semilogy(nets[net]['train_loss'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot validation loss
plt.figure(figsize=(14, 7))
plt.title("Validation loss")
plt.xlabel("Number of epoch")
plt.ylabel("Loss")
for net in nets:
plt.semilogy(nets[net]['test_loss'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot training accuracy
plt.figure(figsize=(14, 7))
plt.title("Training accuracy")
plt.xlabel("Number of epoch")
plt.ylabel("Accuracy")
for net in nets:
plt.plot(nets[net]['train_acc'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot testing accuracy
plt.figure(figsize=(14, 7))
plt.title("Validation accuracy")
plt.xlabel("Number of epoch")
plt.ylabel("Accuracy")
for net in nets:
plt.plot(nets[net]['test_acc'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
print("Time spent on training:")
for net in nets:
print(net, round(nets[net]['time'], 2), "s")
print_report(nets)
np.random.seed(101)
n_epoch = 30
optimizer_config = {'learning_rate' : 1e-2, 'momentum': 0.9, 'beta1': 0.9, 'beta2': 0.9, 'epsilon': 10e-6}
# Define the net with BN
net_BN = Sequential()
net_BN.add(Linear(784, 100))
net_BN.add(BatchNormalization(alpha=0.1))
net_BN.add(ChannelwiseScaling(100))
net_BN.add(ReLU())
net_BN.add(Linear(100, 200))
net_BN.add(ReLU())
net_BN.add(Linear(200, 10))
net_BN.add(LogSoftMax())
# Define the net without BN
net_woBN = Sequential()
net_woBN.add(Linear(784, 100))
net_woBN.add(ReLU())
net_woBN.add(Linear(100, 200))
net_woBN.add(ReLU())
net_woBN.add(Linear(200, 10))
net_woBN.add(LogSoftMax())
nets = {'w/ Batch Normalization' : {'net' : net_BN,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'train_loss': [],
'test_loss' : [],
'train_acc': [],
'test_acc' : [],
'time': 0,
'optimizer' : sgd_momentum},
'w/o Batch Normalization': {'net' : net_woBN,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'train_loss': [],
'test_loss' : [],
'train_acc': [],
'test_acc' : [],
'time' : 0,
'optimizer' : sgd_momentum}}
np.random.seed(101)
# Train the net
for i in tqdm(range(n_epoch)):
for net in nets:
nets[net]['net'].train()
time_start = time()
loss, acc = train_epoch(nets[net]['net'], nets[net]['criterion'], X_train, y_train,
batch_size, optimizer_config, nets[net]['opt_state'],
nets[net]['optimizer'])
time_end = time()
nets[net]['net'].evaluate()
nets[net]['time'] += (time_end - time_start)
nets[net]['train_loss'].append(loss)
nets[net]['train_acc'].append(acc)
predictions_test = nets[net]['net'].forward(X_test)
nets[net]['test_loss'].append(nets[net]['criterion'].forward(predictions_test, y_test))
nets[net]['test_acc'].append(get_accuracy(predictions_test, y_test))
display.clear_output(wait=True)
# Plot training loss
plt.figure(figsize=(14, 7))
plt.title("Training loss")
plt.xlabel("Number of epoch")
plt.ylabel("Loss")
for net in nets:
plt.semilogy(nets[net]['train_loss'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot validation loss
plt.figure(figsize=(14, 7))
plt.title("Validation loss")
plt.xlabel("Number of epoch")
plt.ylabel("Loss")
for net in nets:
plt.semilogy(nets[net]['test_loss'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot training accuracy
plt.figure(figsize=(14, 7))
plt.title("Training accuracy")
plt.xlabel("Number of epoch")
plt.ylabel("Accuracy")
for net in nets:
plt.plot(nets[net]['train_acc'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot testing accuracy
plt.figure(figsize=(14, 7))
plt.title("Validation accuracy")
plt.xlabel("Number of epoch")
plt.ylabel("Accuracy")
for net in nets:
plt.plot(nets[net]['test_acc'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
print("Time spent on training:")
for net in nets:
print(net, round(nets[net]['time'], 2), "s")
print_report(nets)
np.random.seed(101)
optimizer_config = {'learning_rate' : 1e-3, 'momentum': 0.9, 'beta1': 0.9, 'beta2': 0.99, 'epsilon': 10e-6}
# Define the net
net_SGB = Sequential()
net_SGB.add(Linear(784, 100))
net_SGB.add(ReLU())
net_SGB.add(Linear(100, 200))
net_SGB.add(ReLU())
net_SGB.add(Linear(200, 10))
net_SGB.add(LogSoftMax())
net_Adam = Sequential()
net_Adam.add(Linear(784, 100))
net_Adam.add(ReLU())
net_Adam.add(Linear(100, 200))
net_Adam.add(ReLU())
net_Adam.add(Linear(200, 10))
net_Adam.add(LogSoftMax())
nets = {'SGD Momentum' : {'net' : net_SGB,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'train_loss': [],
'test_loss' : [],
'train_acc': [],
'test_acc' : [],
'time': 0,
'optimizer' : sgd_momentum},
'Adam optimizer': {'net' : net_Adam,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'train_loss': [],
'test_loss' : [],
'train_acc': [],
'test_acc' : [],
'time' : 0,
'optimizer' : adam_optimizer}}
np.random.seed(101)
# Train the net
for i in tqdm(range(n_epoch)):
for net in nets:
nets[net]['net'].train()
time_start = time()
loss, acc = train_epoch(nets[net]['net'], nets[net]['criterion'], X_train, y_train,
batch_size, optimizer_config, nets[net]['opt_state'],
nets[net]['optimizer'])
time_end = time()
nets[net]['net'].evaluate()
nets[net]['time'] += (time_end - time_start)
nets[net]['train_loss'].append(loss)
nets[net]['train_acc'].append(acc)
predictions_test = nets[net]['net'].forward(X_test)
nets[net]['test_loss'].append(nets[net]['criterion'].forward(predictions_test, y_test))
nets[net]['test_acc'].append(get_accuracy(predictions_test, y_test))
display.clear_output(wait=True)
# Plot training loss
plt.figure(figsize=(14, 7))
plt.title("Training loss")
plt.xlabel("Number of epoch")
plt.ylabel("Loss")
for net in nets:
plt.semilogy(nets[net]['train_loss'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot validation loss
plt.figure(figsize=(14, 7))
plt.title("Validation loss")
plt.xlabel("Number of epoch")
plt.ylabel("Loss")
for net in nets:
plt.semilogy(nets[net]['test_loss'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot training accuracy
plt.figure(figsize=(14, 7))
plt.title("Training accuracy")
plt.xlabel("Number of epoch")
plt.ylabel("Accuracy")
for net in nets:
plt.plot(nets[net]['train_acc'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot testing accuracy
plt.figure(figsize=(14, 7))
plt.title("Validation accuracy")
plt.xlabel("Number of epoch")
plt.ylabel("Accuracy")
for net in nets:
plt.plot(nets[net]['test_acc'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
print("Time spent on training:")
for net in nets:
print(net, round(nets[net]['time'], 2), "s")
print_report(nets)
np.random.seed(101)
batch_size = 500
n_epoch = 30
optimizer_config = {'learning_rate' : 1e-2, 'momentum': 0.9}
# Define the net
net_DROPOUT = Sequential()
net_DROPOUT.add(Linear(784, 400))
net_DROPOUT.add(Dropout(0.5))
net_DROPOUT.add(ReLU())
net_DROPOUT.add(Linear(400, 10))
net_DROPOUT.add(LogSoftMax())
net_woDROPOUT = Sequential()
net_woDROPOUT.add(Linear(784, 500))
net_woDROPOUT.add(ReLU())
net_woDROPOUT.add(Linear(500, 10))
net_woDROPOUT.add(LogSoftMax())
nets = {'w/ Dropout' : {'net' : net_DROPOUT,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'train_loss': [],
'test_loss' : [],
'train_acc': [],
'test_acc' : [],
'time': 0,
'optimizer' : sgd_momentum},
'w/o Dropout': {'net' : net_woDROPOUT,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'train_loss': [],
'test_loss' : [],
'train_acc': [],
'test_acc' : [],
'time' : 0,
'optimizer' : sgd_momentum}}
np.random.seed(101)
# Train the net
for i in tqdm(range(n_epoch)):
for net in nets:
nets[net]['net'].train()
time_start = time()
loss, acc = train_epoch(nets[net]['net'], nets[net]['criterion'], X_train, y_train,
batch_size, optimizer_config, nets[net]['opt_state'],
nets[net]['optimizer'])
time_end = time()
nets[net]['net'].evaluate()
nets[net]['time'] += (time_end - time_start)
nets[net]['train_loss'].append(loss)
nets[net]['train_acc'].append(acc)
predictions_test = nets[net]['net'].forward(X_test)
nets[net]['test_loss'].append(nets[net]['criterion'].forward(predictions_test, y_test))
nets[net]['test_acc'].append(get_accuracy(predictions_test, y_test))
display.clear_output(wait=True)
# Plot training loss
plt.figure(figsize=(14, 7))
plt.title("Training and validation loss")
plt.xlabel("Number of epoch")
plt.ylabel("Loss")
for net in nets:
plt.semilogy(nets[net]['train_loss'],label=net+"(train)")
#plt.legend(loc='best')
#plt.grid()
#plt.show()
# Plot validation loss
#plt.figure(figsize=(14, 7))
#plt.title("Validation loss")
#plt.xlabel("Number of epoch")
#plt.ylabel("Loss")
for net in nets:
plt.semilogy(nets[net]['test_loss'],label=net+"(val)",ls=':')
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot training accuracy
plt.figure(figsize=(14, 7))
plt.title("Training accuracy")
plt.xlabel("Number of epoch")
plt.ylabel("Accuracy")
for net in nets:
plt.plot(nets[net]['train_acc'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
# Plot testing accuracy
plt.figure(figsize=(14, 7))
plt.title("Validation accuracy")
plt.xlabel("Number of epoch")
plt.ylabel("Accuracy")
for net in nets:
plt.plot(nets[net]['test_acc'],label=net)
plt.legend(loc='best')
plt.grid()
plt.show()
print("Time spent on training:")
for net in nets:
print(net, round(nets[net]['time'], 2), "s")
print_report(nets)
X_train, y_train, X_val, y_val, X_test, y_test = mnist.load_dataset(flatten=True)
y_train_oh = one_hot_encode(y_train)
y_val_oh = one_hot_encode(y_val)
y_test_oh = one_hot_encode(y_test)
def train_epoch_emb(net, criterion, X, y, batch_size, optimizer_config, optimizer_state, optimizer):
for x_batch, y_batch in get_batches((X, y), batch_size):
net.zeroGradParameters()
# Forward
predictions = net.forward(x_batch)
# Backward
dp = criterion.backward(predictions, y_batch)
net.backward(x_batch, dp)
# Update weights
optimizer(net.getParameters(),
net.getGradParameters(),
optimizer_config,
optimizer_state)
np.random.seed(101)
batch_size = 500
n_epoch = 25
optimizer_config = {'learning_rate' : 1e-2, 'momentum': 0.9}
# Define the net with BN
net_CC = Sequential()
net_CC.add(Linear(784, 200))
net_CC.add(ReLU())
net_CC.add(Linear(200, 10))
net_CC.add(LogSoftMax())
# Define the net without BN
net_woCC = Sequential()
net_woCC.add(Linear(784, 200))
net_woCC.add(ReLU())
net_woCC.add(Linear(200, 10))
net_woCC.add(LogSoftMax())
nets = {'w/ Contrastive criterion' : {'net' : net_CC,
'criterion' : ClassContrastiveCriterion(M=10),
'opt_state' : {},
'time': 0,
'optimizer' : sgd_momentum,
'oh' : False},
'w/o Contrastive criterion': {'net' : net_woCC,
'criterion' : ClassNLLCriterion(),
'opt_state' : {},
'time' : 0,
'oh' : True,
'optimizer' : sgd_momentum}}
np.random.seed(101)
# Train the net
for i in tqdm(range(n_epoch)):
for net in nets:
nets[net]['net'].train()
time_start = time()
if (nets[net]['oh']):
loss = train_epoch_emb(nets[net]['net'], nets[net]['criterion'], X_train, y_train_oh,
batch_size, optimizer_config, nets[net]['opt_state'],
nets[net]['optimizer'])
else:
loss = train_epoch_emb(nets[net]['net'], nets[net]['criterion'], X_train, y_train,
batch_size, optimizer_config, nets[net]['opt_state'],
nets[net]['optimizer'])
time_end = time()
nets[net]['net'].evaluate()
nets[net]['time'] += (time_end - time_start)
display.clear_output(wait=True)
print("Time spent on training:")
for net in nets:
print(net, round(nets[net]['time'], 2), "s")
nets['w/ Contrastive criterion']['net'].forward(X_val)
embeddinds_CC = nets['w/ Contrastive criterion']['net'][-2].output
nets['w/o Contrastive criterion']['net'].forward(X_val)
embeddinds = nets['w/o Contrastive criterion']['net'][-2].output
#features : 'number of examples' * 'dimensionality'
f = embeddinds_CC
import scipy.spatial
#compute pairwise distances
d = scipy.spatial.distance_matrix(f,f)
#extract distances for the 'positive pairs' (same class),
#extract distances for the 'negarite pairs' (different classes),
labels = y_val
a = np.tile(labels.reshape(len(labels), 1), (1, len(labels)))
b =np.tile(labels.reshape(1, len(labels)), (len(labels), 1))
#labels
c = np.zeros((len(labels), len(labels)))
c[a==b]=1
pind = np.triu(c, k=1)
c = np.zeros((len(labels), len(labels)))
c[a!=b]=1
nind = np.triu(c, k=1)
print(np.sum(pind), np.sum(nind))
pd = d[pind == 1]
nd = d[nind == 1]
plt.figure(figsize=(10,5))
plt.title("With Contrastive loss")
hp,_,_ =plt.hist(pd, bins=100, color='green', alpha = 0.3, weights = np.ones(len(pd))/len(pd), label = 'distances for positive pairs')
hn,_,_ =plt.hist(nd, bins=100, color='red', alpha = 0.3, weights = np.ones(len(nd))/len(nd),label = 'distances for negative pairs')
plt.legend(loc='best')
bhattacharyya = np.sum(np.sqrt(hp*hn))
print("Bhattacharyya coefficient", bhattacharyya)
#features : 'number of examples' * 'dimensionality'
f = embeddinds
import scipy.spatial
#compute pairwise distances
d = scipy.spatial.distance_matrix(f,f)
#extract distances for the 'positive pairs' (same class),
#extract distances for the 'negarite pairs' (different classes),
labels = y_val
a = np.tile(labels.reshape(len(labels), 1), (1, len(labels)))
b =np.tile(labels.reshape(1, len(labels)), (len(labels), 1))
#labels
c = np.zeros((len(labels), len(labels)))
c[a==b]=1
pind = np.triu(c, k=1)
c = np.zeros((len(labels), len(labels)))
c[a!=b]=1
nind = np.triu(c, k=1)
print(np.sum(pind), np.sum(nind))
pd = d[pind == 1]
nd = d[nind == 1]
plt.figure(figsize=(10,5))
plt.title("Without Contrastive loss")
hp,_,_ =plt.hist(pd, bins=100, color='green', alpha = 0.3, weights = np.ones(len(pd))/len(pd), label = 'distances for positive pairs')
hn,_,_ =plt.hist(nd, bins=100, color='red', alpha = 0.3, weights = np.ones(len(nd))/len(nd),label = 'distances for negative pairs')
plt.legend(loc='best')
bhattacharyya = np.sum(np.sqrt(hp*hn))
print("Bhattacharyya coefficient", bhattacharyya)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Extract Data
Step2: 3. Select Data
Step3: Afterwards, we filter by the important airports (ATL, DFW, JFK, LAX and ORD)
Step4: 4. Sample Data
Step5: 5. Clean Data
Step6: Second, we delete any rows with null values
Step7: Third, we change the Actual Elapse Time column to an integer
Step8: Finally, we clean any invalid Data - any flight that has negative time
Step9: 6. Add new Columns
Step10: We also calculate the delay column, which is one if a flight was cancelled, diverted, or has delayed over 30 min
Step11: 7. Dummy Variables
Step12: Using the dummy variables, we create a table for the regression by concatenating all of the dummy columns and the dependant variable
Step13: 8. Split Test & Learn Datasets
Step14: And we make sure all variables are an integer for the regression
Step15: 9. 50-50 Data
Step16: And for the testing data set
Step17: 10. Regression - Delayed
Step18: 11. Regression - Predicted Total Time (Flight time + taxi)
Step19: VALIDATION AND TESTING
Step20: Validate with Test Data -- Predicted Total Time (Flight time + taxi)
Step21: Predicted Total Time Graph (Testing set is used here)
Step22: Calculate ROC -- Predicted Delay (Testing set is used here)
Step23: Finally, we graph the ROC line
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import statsmodels.api as sm
from sklearn.cross_validation import train_test_split
import math
import numpy as np
import matplotlib.pyplot as plt
gdf = pd.read_csv("./CSV/merged.csv")
df1 = gdf[['AIRLINE_ID','ORIGIN', 'DEST', 'DEP_TIME','ARR_TIME','DEP_DELAY','ARR_DELAY','CANCELLED','DIVERTED','ACTUAL_ELAPSED_TIME']]
df2 = df1.query('(ORIGIN == "ATL" or ORIGIN == "DFW" or ORIGIN == "JFK" or ORIGIN == "LAX" or ORIGIN == "ORD") and (DEST == "ATL" or DEST == "DFW" or DEST == "JFK" or DEST == "LAX" or DEST == "ORD")')
sampledf = df2.sample(n=10000)
print("Sample Size:" + str(len(sampledf.axes[0])))
sampledf["AIRLINE_ID"] = sampledf.apply(lambda row: str.strip(str(row.AIRLINE_ID)), axis=1)
sampledf["ORIGIN"] = sampledf.apply(lambda row: str.strip(str(row.ORIGIN)), axis=1)
sampledf["DEST"] = sampledf.apply(lambda row: str.strip(str(row.DEST)), axis=1)
sampledf = sampledf.dropna()
sampledf["ACTUAL_ELAPSED_TIME"] = sampledf.apply(lambda row: int(float(row.ACTUAL_ELAPSED_TIME)), axis=1)
sampledf = sampledf[sampledf.ACTUAL_ELAPSED_TIME >= 0]
sampledf["Morning"] = sampledf.apply(lambda row: 1 if(not row.CANCELLED and int(row.DEP_TIME) >= 600 and int(row.DEP_TIME) < 1200) else 0, axis=1)
sampledf["Afternoon"] = sampledf.apply(lambda row: 1 if(not row.CANCELLED and int(row.DEP_TIME) >= 1200 and int(row.DEP_TIME) < 1900) else 0, axis=1)
sampledf["Night"] = sampledf.apply(lambda row: 1 if(not row.CANCELLED and int(row.DEP_TIME) >= 1900 and int(row.DEP_TIME) < 2400) else 0, axis=1)
sampledf["Dawn"] = sampledf.apply(lambda row: 1 if(not row.CANCELLED and int(row.DEP_TIME) >= 2400 and int(row.DEP_TIME) < 600) else 0, axis=1)
sampledf["Delayed"] = sampledf.apply(lambda row: 1 if(row.CANCELLED or row.DIVERTED or row.ARR_DELAY > 30) else 0 , axis=1)
sampledf.columns
originDummy = pd.get_dummies(sampledf["ORIGIN"], prefix="ORG", drop_first=True)
destDummy = pd.get_dummies(sampledf["DEST"], prefix="DST", drop_first=True)
airlineDummy = pd.get_dummies(sampledf["AIRLINE_ID"], prefix="AIRLN", drop_first=True)
dummyDf = pd.DataFrame()
dummyDf = pd.concat([originDummy,destDummy,airlineDummy,sampledf['Morning'], sampledf['Afternoon'], sampledf['Night'],sampledf['Delayed'],sampledf['ACTUAL_ELAPSED_TIME']], axis=1)
dummyDf.head()
trainingDF, testDF = train_test_split(dummyDf, test_size = 0.2)
trainingDF = trainingDF.applymap(np.int)
testDF = testDF.applymap(np.int)
trainingDFDelayed = trainingDF[trainingDF.Delayed == 1].head(500)
trainingDFNotDelayed = trainingDF[trainingDF.Delayed == 0].head(500)
allTraining = [trainingDFNotDelayed,trainingDFDelayed]
trainingDF = pd.concat(allTraining)
print("Training Size:" + str(len(trainingDF.axes[0])))
testDFDelayed = testDF[testDF.Delayed == 1].head(100)
testDFNotDelayed = testDF[testDF.Delayed == 0].head(100)
allTest = [testDFDelayed,testDFNotDelayed]
testDF = pd.concat(allTest)
print("Testing Size:" + str(len(testDF.axes[0])))
XValues = sm.add_constant(trainingDF[trainingDF.columns.difference(['Delayed','ACTUAL_ELAPSED_TIME'])], prepend=False)
resultDelayed = sm.OLS(trainingDF['Delayed'], XValues).fit()
print(resultDelayed.summary())
XValues = sm.add_constant(trainingDF[trainingDF.columns.difference(['Delayed','ACTUAL_ELAPSED_TIME'])])
resultTime = sm.OLS(trainingDF['ACTUAL_ELAPSED_TIME'], XValues ).fit()
print(resultTime.summary())
#Copy of the testing data set
validateDataDelay = testDF.copy()
#Get a subset of the data without the validation data
subsetPredictDelay = validateDataDelay[validateDataDelay.columns.difference(['Delayed','ACTUAL_ELAPSED_TIME'])]
#Predict the outcome with the regression and put the result in a new column
subsetPredictDelay['Calculated_Delay'] = subsetPredictDelay.apply(lambda row: (row * resultDelayed.params).sum(),axis=1)
#Add the real outcome in a new column
subsetPredictDelay["Real_Delayed"] = testDF["Delayed"]
subsetPredictDelay.head()[["Real_Delayed","Calculated_Delay"]]
#Copy of the testing data set
validateDataTime = testDF.copy()
subsetPredictTime = validateDataTime[validateDataTime.columns.difference(['Delayed','ACTUAL_ELAPSED_TIME'])]
subsetPredictTime["const"] = 1
subsetPredictTime['Calculated'] = subsetPredictTime.apply(lambda row: (row * resultTime.params).sum(),axis=1)
subsetPredictTime["ACTUAL_ELAPSED_TIME"] = validateDataTime["ACTUAL_ELAPSED_TIME"]
subsetPredictTime["Difference"] = subsetPredictTime.apply(lambda row: abs(row.ACTUAL_ELAPSED_TIME - row.Calculated), axis=1)
subsetPredictTime.head()[["ACTUAL_ELAPSED_TIME","Calculated","Difference"]]
#Create dataframe with the difference ranges
roicTime = pd.DataFrame({"Values":range(int(subsetPredictTime["Difference"].min()),int(subsetPredictTime["Difference"].max()),10)})
roicTime["Percentage"] = roicTime.apply(lambda row: len(subsetPredictTime[subsetPredictTime.Difference < row.Values]["Difference"]) / len(subsetPredictTime["Difference"]) * 100, axis=1 )
plt.plot(roicTime.Values,roicTime.Percentage)
plt.xlabel("Time Difference between actual and predicted (min)")
plt.ylabel("% of Data covered")
plt.title('Predict Time Validation Graph')
plt.show()
roicDelay = pd.DataFrame({"Values": np.arange(subsetPredictDelay["Calculated_Delay"].min(),subsetPredictDelay["Calculated_Delay"].max(),0.1)})
#True Positive
roicDelay["T_P"] = roicDelay.apply(lambda row:len(subsetPredictDelay[(subsetPredictDelay.Calculated_Delay > row.Values) & (subsetPredictDelay.Real_Delayed == 1)]),axis=1)
#False Positive
roicDelay["F_P"] = roicDelay.apply(lambda row:len(subsetPredictDelay[(subsetPredictDelay.Calculated_Delay > row.Values) & (subsetPredictDelay.Real_Delayed == 0)]),axis=1)
#True Negative
roicDelay["T_N"] = roicDelay.apply(lambda row:len(subsetPredictDelay[(subsetPredictDelay.Calculated_Delay < row.Values) & (subsetPredictDelay.Real_Delayed == 0)]),axis=1)
#False Negative
roicDelay["F_N"] = roicDelay.apply(lambda row:len(subsetPredictDelay[(subsetPredictDelay.Calculated_Delay < row.Values) & (subsetPredictDelay.Real_Delayed == 1)]),axis=1)
roicDelay
#False Posive Ration
roicDelay["F_P_R"] = roicDelay.apply(lambda row: row["F_P"]/(row["F_P"] + row["T_N"]),axis=1)
#Recall Ration
roicDelay["Recall"] = roicDelay.apply(lambda row: row["T_P"]/(row["T_P"] + row["F_N"]),axis=1)
plt.plot(roicDelay["F_P_R"],roicDelay["Recall"] )
plt.xlabel("False Positive Relation")
plt.ylabel("Recall")
plt.title('ROC Chart')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What's this TensorFlow business?
Step2: Example Model
Step3: TensorFlow supports many other layer types, loss functions, and optimizers - you will experiment with these next. Here's the official API documentation for these (if any of the parameters used above were unclear, this resource will also be helpful).
Step4: Training a specific model
Step5: To make sure you're doing the right thing, use the following tool to check the dimensionality of your output (it should be 64 x 10, since our batches have size 64 and the output of the final affine layer should be 10, corresponding to our 10 classes)
Step6: You should see the following from the run above
Step7: You should observe that even a simple forward pass like this is significantly faster on the GPU. So for the rest of the assignment (and when you go train your models in assignment 3 and your project!), you should use GPU devices. However, with TensorFlow, the default device is a GPU if one is available, and a CPU otherwise, so we can skip the device specification from now on.
Step8: Train the model
Step9: Check the accuracy of the model.
Step10: Train a great model on CIFAR-10!
Step11: Describe what you did here
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
import math
import timeit
import matplotlib.pyplot as plt
%matplotlib inline
from cs231n.data_utils import load_CIFAR10
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000):
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# clear old variables
tf.reset_default_graph()
# setup input (e.g. the data that changes every batch)
# The first dim is None, and gets sets automatically based on batch size fed in
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool)
def simple_model(X,y):
# define our weights (e.g. init_two_layer_convnet)
# setup variables
Wconv1 = tf.get_variable("Wconv1", shape=[7, 7, 3, 32])
bconv1 = tf.get_variable("bconv1", shape=[32])
W1 = tf.get_variable("W1", shape=[5408, 10])
b1 = tf.get_variable("b1", shape=[10])
# define our graph (e.g. two_layer_convnet)
a1 = tf.nn.conv2d(X, Wconv1, strides=[1,2,2,1], padding='VALID') + bconv1
h1 = tf.nn.relu(a1)
h1_flat = tf.reshape(h1,[-1,5408])
y_out = tf.matmul(h1_flat,W1) + b1
return y_out
y_out = simple_model(X,y)
# define our loss
total_loss = tf.losses.hinge_loss(tf.one_hot(y,10),logits=y_out)
mean_loss = tf.reduce_mean(total_loss)
# define our optimizer
optimizer = tf.train.AdamOptimizer(5e-4) # select optimizer and set learning rate
train_step = optimizer.minimize(mean_loss)
def run_model(session, predict, loss_val, Xd, yd,
epochs=1, batch_size=64, print_every=100,
training=None, plot_losses=False):
# have tensorflow compute accuracy
correct_prediction = tf.equal(tf.argmax(predict,1), y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# shuffle indicies
train_indicies = np.arange(Xd.shape[0])
np.random.shuffle(train_indicies)
training_now = training is not None
# setting up variables we want to compute (and optimizing)
# if we have a training function, add that to things we compute
variables = [mean_loss,correct_prediction,accuracy]
if training_now:
variables[-1] = training
# counter
iter_cnt = 0
for e in range(epochs):
# keep track of losses and accuracy
correct = 0
losses = []
# make sure we iterate over the dataset once
for i in range(int(math.ceil(Xd.shape[0]/batch_size))):
# generate indicies for the batch
start_idx = (i*batch_size)%X_train.shape[0]
idx = train_indicies[start_idx:start_idx+batch_size]
# create a feed dictionary for this batch
feed_dict = {X: Xd[idx,:],
y: yd[idx],
is_training: training_now }
# get batch size
actual_batch_size = yd[i:i+batch_size].shape[0]
# have tensorflow compute loss and correct predictions
# and (if given) perform a training step
loss, corr, _ = session.run(variables,feed_dict=feed_dict)
# aggregate performance stats
losses.append(loss*actual_batch_size)
correct += np.sum(corr)
# print every now and then
if training_now and (iter_cnt % print_every) == 0:
print("Iteration {0}: with minibatch training loss = {1:.3g} and accuracy of {2:.2g}"\
.format(iter_cnt,loss,np.sum(corr)/actual_batch_size))
iter_cnt += 1
total_correct = correct/Xd.shape[0]
total_loss = np.sum(losses)/Xd.shape[0]
print("Epoch {2}, Overall loss = {0:.3g} and accuracy of {1:.3g}"\
.format(total_loss,total_correct,e+1))
if plot_losses:
plt.plot(losses)
plt.grid(True)
plt.title('Epoch {} Loss'.format(e+1))
plt.xlabel('minibatch number')
plt.ylabel('minibatch loss')
plt.show()
return total_loss,total_correct
with tf.Session() as sess:
with tf.device("/cpu:0"): #"/cpu:0" or "/gpu:0"
sess.run(tf.global_variables_initializer())
print('Training')
run_model(sess,y_out,mean_loss,X_train,y_train,1,64,100,train_step,True)
print('Validation')
run_model(sess,y_out,mean_loss,X_val,y_val,1,64)
# clear old variables
tf.reset_default_graph()
# define our input (e.g. the data that changes every batch)
# The first dim is None, and gets sets automatically based on batch size fed in
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool)
# define model
def complex_model(X,y,is_training):
pass
pass
y_out = complex_model(X,y,is_training)
# Now we're going to feed a random batch into the model
# and make sure the output is the right size
x = np.random.randn(64, 32, 32,3)
with tf.Session() as sess:
with tf.device("/cpu:0"): #"/cpu:0" or "/gpu:0"
tf.global_variables_initializer().run()
ans = sess.run(y_out,feed_dict={X:x,is_training:True})
%timeit sess.run(y_out,feed_dict={X:x,is_training:True})
print(ans.shape)
print(np.array_equal(ans.shape, np.array([64, 10])))
try:
with tf.Session() as sess:
with tf.device("/gpu:0") as dev: #"/cpu:0" or "/gpu:0"
tf.global_variables_initializer().run()
ans = sess.run(y_out,feed_dict={X:x,is_training:True})
%timeit sess.run(y_out,feed_dict={X:x,is_training:True})
except tf.errors.InvalidArgumentError:
print("no gpu found, please use Google Cloud if you want GPU acceleration")
# rebuild the graph
# trying to start a GPU throws an exception
# and also trashes the original graph
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool)
y_out = complex_model(X,y,is_training)
# Inputs
# y_out: is what your model computes
# y: is your TensorFlow variable with label information
# Outputs
# mean_loss: a TensorFlow variable (scalar) with numerical loss
# optimizer: a TensorFlow optimizer
# This should be ~3 lines of code!
mean_loss = None
optimizer = None
pass
train_step = optimizer.minimize(mean_loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print('Training')
run_model(sess,y_out,mean_loss,X_train,y_train,1,64,100,train_step)
print('Validation')
run_model(sess,y_out,mean_loss,X_val,y_val,1,64)
# Feel free to play with this cell
def my_model(X,y,is_training):
pass
pass
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool)
y_out = my_model(X,y,is_training)
mean_loss = None
optimizer = None
train_step = optimizer.minimize(mean_loss)
pass
# Feel free to play with this cell
# This default code creates a session
# and trains your model for 10 epochs
# then prints the validation set accuracy
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print('Training')
run_model(sess,y_out,mean_loss,X_train,y_train,10,64,100,train_step,True)
print('Validation')
run_model(sess,y_out,mean_loss,X_val,y_val,1,64)
# Test your model here, and make sure
# the output of this cell is the accuracy
# of your best model on the training and val sets
# We're looking for >= 70% accuracy on Validation
print('Training')
run_model(sess,y_out,mean_loss,X_train,y_train,1,64)
print('Validation')
run_model(sess,y_out,mean_loss,X_val,y_val,1,64)
print('Test')
run_model(sess,y_out,mean_loss,X_test,y_test,1,64)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: AWS (S3, Redshift, Kinesis) + Databricks Spark = Real-time Smart Meter Analytics
Step2: Copy Postgres to S3 via Postgres dump to CSV and s3cmd upload
Step3: Amazon Redshift
Step4: create table electricity (
Step7: Databricks Spark Analysis (see Databricks)
|
<ASSISTANT_TASK:>
Python Code:
s3 = boto3.client('s3')
s3.list_buckets()
def create_s3_bucket(bucketname):
Quick method to create bucket with exception handling
s3 = boto3.resource('s3')
exists = True
bucket = s3.Bucket(bucketname)
try:
s3.meta.client.head_bucket(Bucket=bucketname)
except botocore.exceptions.ClientError as e:
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
if exists:
print 'Bucket {} already exists'.format(bucketname)
else:
s3.create_bucket(Bucket=bucketname, GrantFullControl='dkelly628')
create_s3_bucket('pecanstreetresearch-2016')
# Note: Used s3cmd tools because awscli tools not working in conda env
# 14m rows or ~ 1.2 GB local unzipped; 10min write to CSV and another 10min to upload to S3
# !s3cmd put ~/Users/Doug/PecanStreet/electricity-03-06-2016.csv s3://pecanstreetresearch-2016/electricity-03-06-2016.csv
# 200k rows ~ 15 MB local unzipped; 30 sec write to CSV and 15 sec upload to S3
# !s3cmd put ~/Users/Doug/PecanStreet/weather-03-06-2016.csv s3://pecanstreetresearch-2016/weather-03-06-2016.csv
# Quick geohashing before uploading to Redshift
weather_df = pd.read_csv('/Users/Doug/PecanStreet/weather_03-06-2016.csv')
weather_df.groupby(['latitude', 'longitude', 'city']).count()
weather_df['city'] = weather_df['Austin' if weather_df.latitude=30.292432 elif '']
weather_df['city'] = 'city'
weather_df.city.unique()
# weather_df['city'][weather_df.latitude==40.027278] = 'Boulder'
weather_df.to_csv('/Users/Doug/PecanStreet/weather-03-07-2016.csv', index=False)
metadata_df = pd.read_csv('/Users/Doug/PecanStreet/dataport-metadata.csv')
metadata_df = metadata_df[['dataid','city', 'state']]
metadata_df.to_csv('/Users/Doug/PecanStreet/metadata.csv', index=False)
# !s3cmd put metadata.csv s3://pecanstreetresearch-2016/metadata/metadata.csv
redshift = boto3.client('redshift')
# redshift.describe_clusters()
# psql -h pecanstreet.czxmxphrw2wv.us-east-1.redshift.amazonaws.com -U dkelly628 -d electricity -p 5439
# Complete
COPY electricity
FROM 's3://pecanstreetresearch-2016/electricity/electricity-03-06-2016.csv'
CREDENTIALS 'aws_access_key_id=AWS_ACCESS_KEY_ID;aws_secret_access_key=AWS_SECRET_ACCESS_KEY'
CSV
IGNOREHEADER 1
dateformat 'auto';
# Complete
COPY weather
FROM 's3://pecanstreetresearch-2016/weather/weather-03-06-2016.csv'
CREDENTIALS 'aws_access_key_id=AWS_ACCESS_KEY_ID;aws_secret_access_key=AWS_SECRET_ACCESS_KEY'
CSV
IGNOREHEADER 1
dateformat 'auto';
# Complete
COPY metadata
FROM 's3://pecanstreetresearch-2016/metadata/metadata.csv'
CREDENTIALS 'aws_access_key_id=AWS_ACCESS_KEY_ID;aws_secret_access_key=AWS_SECRET_ACCESS_KEY'
CSV
IGNOREHEADER 1;
# Query for checking error log; invaluable
select query, substring(filename,22,25) as filename,line_number as line,
substring(colname,0,12) as column, type, position as pos, substring(raw_line,0,30) as line_text,
substring(raw_field_value,0,15) as field_text,
substring(err_reason,0,45) as reason
from stl_load_errors
order by query desc
limit 10;
# All table definitions are stored in pg_table_def table; different from Postgres
SELECT DISTINCT tablename
FROM pg_table_def
WHERE schemaname = 'public'
ORDER BY tablename;
# Returns household, time, city, usage by hour, and temperature for all residents in Austin, TX
SELECT e.dataid, e.localhour, m.city, SUM(e.use), w.temperature
FROM electricity AS e
JOIN weather AS w ON e.localhour = w.localhour
JOIN metadata AS m ON e.dataid = m.dataid
WHERE m.city = 'Austin'
GROUP BY e.dataid, e.localhour, m.city, w.temperature;
# Returns number of participants by city, state
SELECT m.city, m.state, COUNT(e.dataid) AS participants
FROM electricity AS e
JOIN metadata AS m ON e.dataid = m.dataid
GROUP BY m.city, m.state;
# Setup connection to Pecan Street Dataport
try:
conn = psycopg2.connect("dbname='electricity' user='dkelly628' host='pecanstreet.czxmxphrw2wv.us-east-1.redshift.amazonaws.com' port='5439' password='password'")
except:
# print "Error: Check there aren't any open connections in notebook or pgAdmin"
electricity_df = pd.read_sql("SELECT localhour, SUM(use) AS usage, SUM(air1) AS cooling, SUM(furnace1) AS heating, \
SUM(car1) AS electric_vehicle \
FROM electricity \
WHERE dataid = 7982 AND use > 0 \
AND localhour BETWEEN '2013-10-16 00:00:00'::timestamp AND \
'2016-02-26 08:00:00'::timestamp \
GROUP BY dataid, localhour \
ORDER BY localhour", conn)
electricity_df['localhour'] = electricity_df.localhour.apply(pd.to_datetime)
electricity_df.set_index('localhour', inplace=True)
electricity_df.fillna(value=0.0, inplace=True)
electricity_df[['usage','cooling']].plot(figsize=(18,9), title="Pecan Street Household 7982 Hourly Energy Consumption")
sns.despine();
kinesis = boto3.client('kinesis')
kinesis.create_stream(StreamName='PecanStreet', ShardCount=2)
kinesis.list_streams()
firehose = boto3.client('firehose')
# firehose.create_delivery_stream(DeliveryStreamName='pecanstreetfirehose', S3DestinationConfiguration={'RoleARN': '', 'BucketARN': 'pecanstreetresearch-2016'})
firehose.list_delivery_streams()
def kinesis_write(stream, ):
Method that writes to kinesis stream
kinesis = boto3.client('kinesis')
kinesis.put(StreamName=stream, )
def kinesis_read():
Method to read from kinesis stream
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Mackenzie (first name)
Step2: Regular expressions can be used to indicate if a string matches a pattern or not.
|
<ASSISTANT_TASK:>
Python Code:
m.group(2)
m.group('first_name')
import re
foo_pattern = re.compile('''
^
([A-Za-z]+)
,[ ]
([A-Za-z]+)
$
''', re.VERBOSE)
s = 'James, Mackenzie'
m = re.match(foo_pattern, s)
m
m.groups
m.group(0)
m.group(1)
m.group(2)
foo_pattern = re.compile('''
^
(?P<last_name>[A-Za-z]+)
,[ ]
(?P<first_name>[A-Za-z]+)
$
''', re.VERBOSE)
m = re.match(foo_pattern, s)
m
m.groups
m.group(0)
m.group(1)
m.group(2)
m.group('last_name')
m.group('first_name')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 13.765.202 lines in train.csv
Step2: Per wikipedia, a value of more than 421 mm/h is considered "Extreme/large hail"
Step3: We regroup the data by ID
Step4: On fully filled dataset
Step5: Predicitons
Step6:
Step7: max prof 24
Step8:
Step9:
Step10:
Step11:
Step12:
Step13:
|
<ASSISTANT_TASK:>
Python Code:
# from __future__ import exam_success
from __future__ import absolute_import
from __future__ import print_function
%matplotlib inline
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import random
import pandas as pd
import scipy.stats as stats
# Sk cheatsfrom sklearn.ensemble import ExtraTreesRegressor
from sklearn.cross_validation import cross_val_score # cross val
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.preprocessing import Imputer # get rid of nan
from sklearn.neighbors import KNeighborsRegressor
from sklearn import grid_search
import os
%%time
#filename = "data/train.csv"
filename = "data/reduced_train_10000.csv"
#filename = "data/reduced_train_1000000.csv"
raw = pd.read_csv(filename)
raw = raw.set_index('Id')
raw.columns
testFull.columns
raw['Expected'].describe()
# Considering that the gauge may concentrate the rainfall, we set the cap to 1000
# Comment this line to analyse the complete dataset
l = len(raw)
raw = raw[raw['Expected'] < 300] #1000
print("Dropped %d (%0.2f%%)"%(l-len(raw),(l-len(raw))/float(l)*100))
raw.head(5)
raw.describe()
# We select all features except for the minutes past,
# because we ignore the time repartition of the sequence for now
features_columns = list([u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th'])
def getXy(raw):
selected_columns = list([ u'minutes_past',u'radardist_km', u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th'])
data = raw[selected_columns]
docX, docY = [], []
for i in data.index.unique():
if isinstance(data.loc[i],pd.core.series.Series):
m = [data.loc[i].as_matrix()]
docX.append(m)
docY.append(float(raw.loc[i]["Expected"]))
else:
m = data.loc[i].as_matrix()
docX.append(m)
docY.append(float(raw.loc[i][:1]["Expected"]))
X , y = np.array(docX) , np.array(docY)
return X,y
#noAnyNan = raw.loc[raw[features_columns].dropna(how='any').index.unique()]
noAnyNan = raw.dropna()
noFullNan = raw.loc[raw[features_columns].dropna(how='all').index.unique()]
fullNan = raw.drop(raw[features_columns].dropna(how='all').index)
%%time
X,y=getXy(noAnyNan)
%%time
#XX = [np.array(t).mean(0) for t in X]
XX = [np.append(np.nanmean(np.array(t),0),(np.array(t)[1:] - np.array(t)[:-1]).sum(0) ) for t in X]
%%time
XX=[]
for t in X:
#print(idx)
tmp = np.append(np.nanmean(np.array(t),0),(np.array(t)[1:] - np.array(t)[:-1]).sum(0) )
tmp = np.append(tmp,np.percentile(t,10,axis=0))
tmp = np.append(tmp,np.percentile(t,50,axis=0))
tmp = np.append(tmp,np.percentile(t,90,axis=0))
test = t
try:
taa=test[:,0]
except TypeError:
taa=[test[0][0]]
valid_time = np.zeros_like(taa)
valid_time[0] = taa[0]
for n in xrange(1,len(taa)):
valid_time[n] = taa[n] - taa[n-1]
valid_time[-1] = valid_time[-1] + 60 - np.sum(valid_time)
valid_time = valid_time / 60.0
sum=0
try:
column_ref=test[:,2]
except TypeError:
column_ref=[test[0][2]]
for dbz, hours in zip(column_ref, valid_time):
# See: https://en.wikipedia.org/wiki/DBZ_(meteorology)
if np.isfinite(dbz):
mmperhr = pow(pow(10, dbz/10)/200, 0.625)
sum = sum + mmperhr * hours
XX.append(np.append(np.array(sum),tmp))
#XX.append(np.array([sum]))
#XX.append(tmp)
XX[2]
def splitTrainTest(X, y, split=0.2):
tmp1, tmp2 = [], []
ps = int(len(X) * (1-split))
index_shuf = range(len(X))
random.shuffle(index_shuf)
for i in index_shuf:
tmp1.append(X[i])
tmp2.append(y[i])
return tmp1[:ps], tmp2[:ps], tmp1[ps:], tmp2[ps:]
X_train,y_train, X_test, y_test = splitTrainTest(XX,y)
def manualScorer(estimator, X, y):
err = (estimator.predict(X_test)-y_test)**2
return -err.sum()/len(err)
from sklearn import svm
svr = svm.SVR(C=100000)
%%time
srv = svr.fit(X_train,y_train)
err = (svr.predict(X_train)-y_train)**2
err.sum()/len(err)
err = (svr.predict(X_test)-y_test)**2
err.sum()/len(err)
%%time
svr_score = cross_val_score(svr, XX, y, cv=5)
print("Score: %s\nMean: %.03f"%(svr_score,svr_score.mean()))
knn = KNeighborsRegressor(n_neighbors=6,weights='distance',algorithm='ball_tree')
#parameters = {'weights':('distance','uniform'),'algorithm':('auto', 'ball_tree', 'kd_tree', 'brute')}
parameters = {'n_neighbors':range(1,10,1)}
grid_knn = grid_search.GridSearchCV(knn, parameters,scoring=manualScorer)
%%time
grid_knn.fit(X_train,y_train)
print(grid_knn.grid_scores_)
print("Best: ",grid_knn.best_params_)
knn = grid_knn.best_estimator_
knn= knn.fit(X_train,y_train)
err = (knn.predict(X_train)-y_train)**2
err.sum()/len(err)
err = (knn.predict(X_test)-y_test)**2
err.sum()/len(err)
etreg = ExtraTreesRegressor(n_estimators=200, max_depth=None, min_samples_split=1, random_state=0)
parameters = {'n_estimators':range(100,200,20)}
grid_rf = grid_search.GridSearchCV(etreg, parameters,n_jobs=2,scoring=manualScorer)
%%time
grid_rf.fit(X_train,y_train)
print(grid_rf.grid_scores_)
print("Best: ",grid_rf.best_params_)
grid_rf.best_params_
es = etreg
#es = grid_rf.best_estimator_
%%time
es = es.fit(X_train,y_train)
err = (es.predict(X_train)-y_train)**2
err.sum()/len(err)
err = (es.predict(X_test)-y_test)**2
err.sum()/len(err)
gbr = GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=900,
subsample=1.0, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_depth=4, init=None,
random_state=None, max_features=None, alpha=0.5,
verbose=0, max_leaf_nodes=None, warm_start=False)
%%time
gbr = gbr.fit(X_train,y_train)
os.system('say "終わりだ"') #its over!
#parameters = {'max_depth':range(2,5,1),'alpha':[0.5,0.6,0.7,0.8,0.9]}
#parameters = {'subsample':[0.2,0.4,0.5,0.6,0.8,1]}
#parameters = {'subsample':[0.2,0.5,0.6,0.8,1],'n_estimators':[800,1000,1200]}
#parameters = {'max_depth':range(2,4,1)}
#parameters = {'n_estimators':[600,800,900]}
#parameters = {'loss':['ls', 'lad', 'huber', 'quantile'],'alpha':[0.3,0.5,0.8,0.9]}
grid_gbr = grid_search.GridSearchCV(gbr, parameters,n_jobs=2,scoring=manualScorer)
%%time
grid_gbr = grid_gbr.fit(X_train,y_train)
print(grid_gbr.grid_scores_)
print("Best: ",grid_gbr.best_params_)
err = (gbr.predict(X_train)-y_train)**2
print(err.sum()/len(err))
err = (gbr.predict(X_test)-y_test)**2
print(err.sum()/len(err))
err = (gbr.predict(X_train)-y_train)**2
print(err.sum()/len(err))
err = (gbr.predict(X_test)-y_test)**2
print(err.sum()/len(err))
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
in_dim = len(XX[0])
out_dim = 1
model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(Dense(128, input_shape=(in_dim,)))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(1, init='uniform'))
model.add(Activation('linear'))
#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(loss='mean_squared_error', optimizer=sgd)
rms = RMSprop()
model.compile(loss='mean_squared_error', optimizer=rms)
#model.fit(X_train, y_train, nb_epoch=20, batch_size=16)
#score = model.evaluate(X_test, y_test, batch_size=16)
prep = []
for i in y_train:
prep.append(min(i,20))
prep=np.array(prep)
mi,ma = prep.min(),prep.max()
fy = (prep-mi) / (ma-mi)
#my = fy.max()
#fy = fy/fy.max()
model.fit(np.array(X_train), fy, batch_size=10, nb_epoch=10, validation_split=0.1)
pred = model.predict(np.array(X_test))*ma+mi
err = (pred-y_test)**2
err.sum()/len(err)
r = random.randrange(len(X_train))
print("(Train) Prediction %0.4f, True: %0.4f"%(model.predict(np.array([X_train[r]]))[0][0]*ma+mi,y_train[r]))
r = random.randrange(len(X_test))
print("(Test) Prediction %0.4f, True: %0.4f"%(model.predict(np.array([X_test[r]]))[0][0]*ma+mi,y_test[r]))
def marshall_palmer(ref, minutes_past):
#print("Estimating rainfall from {0} observations".format(len(minutes_past)))
# how long is each observation valid?
valid_time = np.zeros_like(minutes_past)
valid_time[0] = minutes_past.iloc[0]
for n in xrange(1, len(minutes_past)):
valid_time[n] = minutes_past.iloc[n] - minutes_past.iloc[n-1]
valid_time[-1] = valid_time[-1] + 60 - np.sum(valid_time)
valid_time = valid_time / 60.0
# sum up rainrate * validtime
sum = 0
for dbz, hours in zip(ref, valid_time):
# See: https://en.wikipedia.org/wiki/DBZ_(meteorology)
if np.isfinite(dbz):
mmperhr = pow(pow(10, dbz/10)/200, 0.625)
sum = sum + mmperhr * hours
return sum
def simplesum(ref,hour):
hour.sum()
# each unique Id is an hour of data at some gauge
def myfunc(hour):
#rowid = hour['Id'].iloc[0]
# sort hour by minutes_past
hour = hour.sort('minutes_past', ascending=True)
est = marshall_palmer(hour['Ref'], hour['minutes_past'])
return est
info = raw.groupby(raw.index)
estimates = raw.groupby(raw.index).apply(myfunc)
estimates.head(20)
%%time
etreg.fit(X_train,y_train)
%%time
et_score = cross_val_score(etreg, XX, y, cv=5)
print("Score: %s\tMean: %.03f"%(et_score,et_score.mean()))
%%time
et_score = cross_val_score(etreg, XX, y, cv=5)
print("Score: %s\tMean: %.03f"%(et_score,et_score.mean()))
err = (etreg.predict(X_test)-y_test)**2
err.sum()/len(err)
err = (etreg.predict(X_test)-y_test)**2
err.sum()/len(err)
r = random.randrange(len(X_train))
print(r)
print(etreg.predict(X_train[r]))
print(y_train[r])
r = random.randrange(len(X_test))
print(r)
print(etreg.predict(X_test[r]))
print(y_test[r])
%%time
filename = "data/reduced_test_5000.csv"
#filename = "data/test.csv"
test = pd.read_csv(filename)
test = test.set_index('Id')
features_columns = list([u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th'])
def getX(raw):
selected_columns = list([ u'minutes_past',u'radardist_km', u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th'])
data = raw[selected_columns]
docX= []
for i in data.index.unique():
if isinstance(data.loc[i],pd.core.series.Series):
m = [data.loc[i].as_matrix()]
docX.append(m)
else:
m = data.loc[i].as_matrix()
docX.append(m)
X = np.array(docX)
return X
# We select all features except for the minutes past,
# because we ignore the time repartition of the sequence for now
features_columns = list([u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th'])
def getXy(raw):
selected_columns = list([ u'minutes_past',u'radardist_km', u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th'])
data = raw[selected_columns]
docX, docY = [], []
for i in data.index.unique():
if isinstance(data.loc[i],pd.core.series.Series):
m = [data.loc[i].as_matrix()]
docX.append(m)
docY.append(float(raw.loc[i]["Expected"]))
else:
m = data.loc[i].as_matrix()
docX.append(m)
docY.append(float(raw.loc[i][:1]["Expected"]))
X , y = np.array(docX) , np.array(docY)
return X,y
X=getX(test)
tmp = []
for i in X:
tmp.append(len(i))
tmp = np.array(tmp)
sns.countplot(tmp,order=range(tmp.min(),tmp.max()+1))
plt.title("Number of ID per number of observations\n(On test dataset)")
plt.plot()
testFull = test.dropna()
%%time
X=getX(testFull) # 1min
XX = [np.array(t).mean(0) for t in X] # 10s
%%time
XX=[]
for t in X:
#print(idx)
tmp = np.append(np.nanmean(np.array(t),0),(np.array(t)[1:] - np.array(t)[:-1]).sum(0) )
tmp = np.append(tmp,np.percentile(t,10,axis=0))
tmp = np.append(tmp,np.percentile(t,50,axis=0))
tmp = np.append(tmp,np.percentile(t,90,axis=0))
test = t
try:
taa=test[:,0]
except TypeError:
taa=[test[0][0]]
valid_time = np.zeros_like(taa)
valid_time[0] = taa[0]
for n in xrange(1,len(taa)):
valid_time[n] = taa[n] - taa[n-1]
valid_time[-1] = valid_time[-1] + 60 - np.sum(valid_time)
valid_time = valid_time / 60.0
sum=0
try:
column_ref=test[:,2]
except TypeError:
column_ref=[test[0][2]]
for dbz, hours in zip(column_ref, valid_time):
# See: https://en.wikipedia.org/wiki/DBZ_(meteorology)
if np.isfinite(dbz):
mmperhr = pow(pow(10, dbz/10)/200, 0.625)
sum = sum + mmperhr * hours
XX.append(np.append(np.array(sum),tmp))
#XX.append(np.array([sum]))
#XX.append(tmp)
pd.DataFrame(gbr.predict(XX)).describe()
predFull = zip(testFull.index.unique(),etreg.predict(XX))
testNan = test.drop(test[features_columns].dropna(how='all').index)
tmp = np.empty(len(testNan))
tmp.fill(0.445000) # 50th percentile of full Nan dataset
predNan = zip(testNan.index.unique(),tmp)
testLeft = test.drop(testNan.index.unique()).drop(testFull.index.unique())
tmp = np.empty(len(testLeft))
tmp.fill(1.27) # 50th percentile of full Nan dataset
predLeft = zip(testLeft.index.unique(),tmp)
len(testFull.index.unique())
len(testNan.index.unique())
len(testLeft.index.unique())
pred = predFull + predNan + predLeft
pred.sort(key=lambda x: x[0], reverse=False)
submission = pd.DataFrame(pred)
submission.columns = ["Id","Expected"]
submission.head()
submission.to_csv("first_submit.csv",index=False)
filename = "data/sample_solution.csv"
sol = pd.read_csv(filename)
ss = np.array(sol)
%%time
for a,b in predFull:
ss[a-1][1]=b
ss
sub = pd.DataFrame(ss)
sub.columns = ["Id","Expected"]
sub.Id = sub.Id.astype(int)
sub.head()
sub.to_csv("submit_2.csv",index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's take a look at a few of the delta microstructures by importing draw_microstructures from pymks.tools.
Step2: Using delta microstructures for the calibration of the first-order influence coefficients is essentially the same, as using a unit impulse response to find the kernel of a system in signal processing. Any given delta microstructure is composed of only two phases with the center cell having an alternative phase from the remainder of the domain. The number of delta microstructures that are needed to calibrated the first-order coefficients is $N(N-1)$ where $N$ is the number of phases, therefore in this example we need 6 delta microstructures.
Step3: Let's take a look at one of the delta microstructures and the $\varepsilon_{xx}$ strain field.
Step4: Because slice(None) (the default slice operator in Python, equivalent to array[
Step5: Now, pass the delta microstructures and their strain fields into the fit method to calibrate the first-order influence coefficients.
Step6: That's it, the influence coefficient have been calibrated. Let's take a look at them.
Step7: The influence coefficients for $l=0$ and $l = 1$ have a Gaussian-like shape, while the influence coefficients for $l=2$ are constant-valued. The constant-valued influence coefficients may seem superfluous, but are equally as important. They are equivalent to the constant term in multiple linear regression with categorical variables.
Step8: Note that the calibrated influence coefficients can only be used to reproduce the simulation with the same boundary conditions that they were calibrated with.
Step9: Finally let's compare the results from finite element simulation and the MKS model.
Step10: Let's plot the difference between the two strain fields.
Step11: The MKS model is able to capture the strain field for the random microstructure after being calibrated with delta microstructures.
Step12: The influence coefficients that have already been calibrated on a $n$ by $n$ delta microstructures, need to be resized to match the shape of the new larger $m$ by $m$ microstructure that we want to compute the strain field for. This can be done by passing the shape of the new larger microstructure into the resize_coeff method.
Step13: Let's now take a look that ther resized influence coefficients.
Step14: Because the coefficients have been resized, they will no longer work for our original $n$ by $n$ sized microstructures they were calibrated on, but they can now be used on the $m$ by $m$ microstructures. Just like before, just pass the microstructure as the argument of the predict method to get the strain field.
Step15: Again, let's plot the difference between the two strain fields.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
n = 21
n_phases = 3
from pymks.tools import draw_microstructures
from pymks.datasets import make_delta_microstructures
X_delta = make_delta_microstructures(n_phases=n_phases, size=(n, n))
draw_microstructures(X_delta[::2])
from pymks.datasets import make_elastic_FE_strain_delta
from pymks.tools import draw_microstructure_strain
elastic_modulus = (80, 100, 120)
poissons_ratio = (0.3, 0.3, 0.3)
macro_strain = 0.02
size = (n, n)
X_delta, strains_delta = make_elastic_FE_strain_delta(elastic_modulus=elastic_modulus,
poissons_ratio=poissons_ratio,
size=size, macro_strain=macro_strain)
draw_microstructure_strain(X_delta[0], strains_delta[0])
from pymks import MKSLocalizationModel
from pymks import PrimitiveBasis
prim_basis =PrimitiveBasis(n_states=3, domain=[0, 2])
model = MKSLocalizationModel(basis=prim_basis)
model.fit(X_delta, strains_delta)
from pymks.tools import draw_coeff
draw_coeff(model.coeff)
from pymks.datasets import make_elastic_FE_strain_random
np.random.seed(101)
X, strain = make_elastic_FE_strain_random(n_samples=1, elastic_modulus=elastic_modulus,
poissons_ratio=poissons_ratio, size=size,
macro_strain=macro_strain)
draw_microstructure_strain(X[0] , strain[0])
strain_pred = model.predict(X)
from pymks.tools import draw_strains_compare
draw_strains_compare(strain[0], strain_pred[0])
from pymks.tools import draw_differences
draw_differences([strain[0] - strain_pred[0]], ['Finite Element - MKS'])
m = 3 * n
size = (m, m)
print size
X, strain = make_elastic_FE_strain_random(n_samples=1, elastic_modulus=elastic_modulus,
poissons_ratio=poissons_ratio, size=size,
macro_strain=macro_strain)
draw_microstructure_strain(X[0] , strain[0])
model.resize_coeff(X[0].shape)
draw_coeff(model.coeff)
strain_pred = model.predict(X)
draw_strains_compare(strain[0], strain_pred[0])
draw_differences([strain[0] - strain_pred[0]], ['Finite Element - MKS'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
data = {'spike-2': [1,2,3], 'hey spke': [4,5,6], 'spiked-in': [7,8,9], 'no': [10,11,12]}
df = pd.DataFrame(data)
s = 'spike'
def g(df, s):
spike_cols = [s for col in df.columns if s in col and s != col]
for i in range(len(spike_cols)):
spike_cols[i] = spike_cols[i]+str(i+1)
result = df[[col for col in df.columns if s in col and col != s]]
result.columns = spike_cols
return result
result = g(df.copy(),s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Annealing
Step2: Let's start with a simple linear schedule going from start to end. It returns a function that takes a pos argument (going from 0 to 1) such that this function goes from start (at pos=0) to end (at pos=1) in a linear fashion.
Step3: We can refactor this with a decorator.
Step4: And here are other scheduler functions
Step5: Jump_to lesson 9 video
Step6: In practice, we'll often want to combine different schedulers, the following function does that
Step7: Here is an example
Step8: We can use it for training quite easily...
Step9: ... then check with our recorder if the learning rate followed the right schedule.
Step10: Export
|
<ASSISTANT_TASK:>
Python Code:
x_train,y_train,x_valid,y_valid = get_data()
train_ds,valid_ds = Dataset(x_train, y_train),Dataset(x_valid, y_valid)
nh,bs = 50,512
c = y_train.max().item()+1
loss_func = F.cross_entropy
data = DataBunch(*get_dls(train_ds, valid_ds, bs), c)
#export
def create_learner(model_func, loss_func, data):
return Learner(*model_func(data), loss_func, data)
learn = create_learner(get_model, loss_func, data)
run = Runner([AvgStatsCallback([accuracy])])
run.fit(3, learn)
learn = create_learner(partial(get_model, lr=0.3), loss_func, data)
run = Runner([AvgStatsCallback([accuracy])])
run.fit(3, learn)
#export
def get_model_func(lr=0.5): return partial(get_model, lr=lr)
#export
class Recorder(Callback):
def begin_fit(self): self.lrs,self.losses = [],[]
def after_batch(self):
if not self.in_train: return
self.lrs.append(self.opt.param_groups[-1]['lr'])
self.losses.append(self.loss.detach().cpu())
def plot_lr (self): plt.plot(self.lrs)
def plot_loss(self): plt.plot(self.losses)
class ParamScheduler(Callback):
_order=1
def __init__(self, pname, sched_func): self.pname,self.sched_func = pname,sched_func
def set_param(self):
for pg in self.opt.param_groups:
pg[self.pname] = self.sched_func(self.n_epochs/self.epochs)
def begin_batch(self):
if self.in_train: self.set_param()
def sched_lin(start, end):
def _inner(start, end, pos): return start + pos*(end-start)
return partial(_inner, start, end)
#export
def annealer(f):
def _inner(start, end): return partial(f, start, end)
return _inner
@annealer
def sched_lin(start, end, pos): return start + pos*(end-start)
# shift-tab works too, in Jupyter!
# sched_lin()
f = sched_lin(1,2)
f(0.3)
#export
@annealer
def sched_cos(start, end, pos): return start + (1 + math.cos(math.pi*(1-pos))) * (end-start) / 2
@annealer
def sched_no(start, end, pos): return start
@annealer
def sched_exp(start, end, pos): return start * (end/start) ** pos
def cos_1cycle_anneal(start, high, end):
return [sched_cos(start, high), sched_cos(high, end)]
#This monkey-patch is there to be able to plot tensors
torch.Tensor.ndim = property(lambda x: len(x.shape))
annealings = "NO LINEAR COS EXP".split()
a = torch.arange(0, 100)
p = torch.linspace(0.01,1,100)
fns = [sched_no, sched_lin, sched_cos, sched_exp]
for fn, t in zip(fns, annealings):
f = fn(2, 1e-2)
plt.plot(a, [f(o) for o in p], label=t)
plt.legend();
#export
def combine_scheds(pcts, scheds):
assert sum(pcts) == 1.
pcts = tensor([0] + listify(pcts))
assert torch.all(pcts >= 0)
pcts = torch.cumsum(pcts, 0)
def _inner(pos):
idx = (pos >= pcts).nonzero().max()
if idx == 2: idx = 1
actual_pos = (pos-pcts[idx]) / (pcts[idx+1]-pcts[idx])
return scheds[idx](actual_pos)
return _inner
sched = combine_scheds([0.3, 0.7], [sched_cos(0.3, 0.6), sched_cos(0.6, 0.2)])
plt.plot(a, [sched(o) for o in p])
cbfs = [Recorder,
partial(AvgStatsCallback,accuracy),
partial(ParamScheduler, 'lr', sched)]
learn = create_learner(get_model_func(0.3), loss_func, data)
run = Runner(cb_funcs=cbfs)
run.fit(3, learn)
run.recorder.plot_lr()
run.recorder.plot_loss()
!./notebook2script.py 05_anneal.ipynb
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Create a self-signed certificate
Step2: Not recommended
Step3: Running the secured remote notebook
|
<ASSISTANT_TASK:>
Python Code:
!ssh thauser@thauser@comet.sdsc.edu 'cd .ipython/profile_nbserver; ls -al'
from IPython.lib import passwd
passwd('test password')
!ssh thauser@gordon.sdsc.xsede.org 'head -n 12 .ipython/profile_nbserver/ipython_notebook_config.py'
import sys
import time
import saga
# Adapted from the saga example
# Your ssh identity on the remote machine.
ctx = saga.Context("ssh")
ctx.user_id = 'thauser'
session = saga.Session()
session.add_context(ctx)
# Create a job service object that represent a remote pbs cluster.
js = saga.job.Service("slurm+ssh://comet.sdsc.edu", session=session)
# Set the parameters for this example
local_port=9988
remote_port=11111
username='thauser'
hostname='comet.sdsc.edu'
# Next, we describe the job we want to run. A complete set of job
# description attributes can be found in the API documentation.
jd = saga.job.Description()
jd.wall_time_limit = 10 # minutes
jd.executable = "ipython notebook --profile=nbserver"
jd.queue = "compute"
jd.working_directory = "A"
jd.output = "ipythonjob.out"
jd.error = "ipythonjob.err"
touchjob = js.create_job(jd)
# Check our job's id and state
print "Job ID : %s" % (touchjob.id)
print "Job State : %s" % (touchjob.state)
# Now we can start our job.
print "\n...starting job...\n"
touchjob.run()
print "Job ID : %s" % (touchjob.id)
print "Job State : %s" % (touchjob.state)
# List all jobs that are known by the adaptor.
# This should show our job as well.
print "\nListing active jobs: "
for job in js.list():
print " * %s" % job
# Now we disconnect and reconnect to our job by using the get_job()
# method and our job's id. While this doesn't make a lot of sense
# here, disconnect / reconnect can become very important for
# long-running job.
touchjob_clone = js.get_job(touchjob.id)
print touchjob_clone.state
while touchjob_clone.state == 'Pending':
print "...Waiting for Job to start...."
time.sleep(30)
nodename = touchjob_clone.execution_hosts[0]
nodename = nodename[:-2]
touchjob_clone.wait()
print "Job State : %s" % (touchjob_clone.state)
print "Exitcode : %s" % (touchjob_clone.exit_code)
js.close()
touchjob.id
js.get_job(touchjob.id)
touchjob_clone = js.get_job(touchjob.id)
print touchjob_clone.state
touchjob_clone.execution_hosts
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualisation with NetworkX
Step2: get_interaction_df() is defined in bookworm/build_network.py, and works by searching through the provided cooccurence matrix for interactions with strength above a specified threshold.
Step3: And, just as easily, visualise it with draw_spring(), where spring is a reference to the idea that edges in the network are treated like physical springs, with elasticity/compressability related to the weights of the connections
Step4: Very nice... ish. There's more that could be done to clean up the visualisation and make it pretty, but it's fine for now.
Step5: We can ask NetworkX for cliques in the graph, which are especially relevant to social networks like this. enumerate_all_cliques() gives us a massive list of all the cliques it finds - we'll just return the last one because it's most illustrative of what a clique is in this context...
Step6: It's isolated the people who appear in the book at Number 4, Privet Drive. Fun!
Step7: Similarly, we can use NetworkX's implementation of classic pathfinding algoritms like Dijkstra's algorithm and A* to return paths between characters. For example, if Hedwig was interested in getting to know Nicolas Flamel, and wanted to do so with as few new introductions as possible along the way, these are the shoulders she would need to tap on for introductions
Step8: Pathfinding is clearly an application that is more suited to transport networks etc, but it's still interesting to see it applied here...
Step9: We can write that dictionary out to a .json file in the project's d3 directory using the json package
Step10: Jupyter notebooks allow us to run commands in other languages, so we'll use bash to do a few things from here on. For example, we can list the files in the d3 directory
Step11: or print out one of those files
Step12: The next cell can be used to set up a locally hosted version of that d3.js script.
|
<ASSISTANT_TASK:>
Python Code:
from bookworm import *
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
plt.rcParams['figure.figsize'] = (12,9)
import pandas as pd
import numpy as np
book = load_book('data/raw/hp_philosophers_stone.txt')
characters = extract_character_names(book)
sequences = get_sentence_sequences(book)
df = find_connections(sequences, characters)
cooccurence = calculate_cooccurence(df)
import networkx as nx
interaction_df = get_interaction_df(cooccurence, threshold=2)
interaction_df.sample(5)
G = nx.from_pandas_dataframe(interaction_df,
source='source',
target='target')
nx.draw_spring(G, with_labels=True)
pd.Series(nx.pagerank(G)).sort_values(ascending=False)[:5]
a, b = nx.hits(G)
pd.Series(a).sort_values(ascending=False)[:5]
list(nx.enumerate_all_cliques(G))[-1]
comms = nx.communicability(G)
print(comms["('Vernon ',)"]["('Dumbledore ',)"])
print(comms["('Harry ',)"]["('Hermione ',)"])
nx.dijkstra_path(G,
source="('Hedwig ',)",
target="('Flamel ',)")
nodes = [{"id": str(id), "group": 1} for id in set(interaction_df['source'])]
links = interaction_df.to_dict(orient='records')
d3_dict = {'nodes': nodes, 'links': links}
import json
with open('bookworm/d3/bookworm.json', 'w') as fp:
json.dump(d3_dict, fp)
%%bash
ls bookworm/d3/
%%bash
cat bookworm/d3/index.html
%%bash
cd bookworm/d3/
python -m http.server
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hamilton (1989) switching model of GNP
Step2: We plot the filtered and smoothed probabilities of a recession. Filtered refers to an estimate of the probability at time $t$ based on data up to and including time $t$ (but excluding time $t+1, ..., T$). Smoothed refers to an estimate of the probability at time $t$ using all the data in the sample.
Step3: From the estimated transition matrix we can calculate the expected duration of a recession versus an expansion.
Step4: In this case, it is expected that a recession will last about one year (4 quarters) and an expansion about two and a half years.
Step5: Below we plot the probabilities of being in each of the regimes; only in a few periods is a high-variance regime probable.
Step6: Filardo (1994) Time-Varying Transition Probabilities
Step7: The time-varying transition probabilities are specified by the exog_tvtp parameter.
Step8: Below we plot the smoothed probability of the economy operating in a low-production state, and again include the NBER recessions for comparison.
Step9: Using the time-varying transition probabilities, we can see how the expected duration of a low-production state changes over time
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import requests
from io import BytesIO
# NBER recessions
from pandas_datareader.data import DataReader
from datetime import datetime
usrec = DataReader('USREC', 'fred', start=datetime(1947, 1, 1), end=datetime(2013, 4, 1))
# Get the RGNP data to replicate Hamilton
from statsmodels.tsa.regime_switching.tests.test_markov_autoregression import rgnp
dta_hamilton = pd.Series(rgnp, index=pd.date_range('1951-04-01', '1984-10-01', freq='QS'))
# Plot the data
dta_hamilton.plot(title='Growth rate of Real GNP', figsize=(12,3))
# Fit the model
mod_hamilton = sm.tsa.MarkovAutoregression(dta_hamilton, k_regimes=2, order=4, switching_ar=False)
res_hamilton = mod_hamilton.fit()
res_hamilton.summary()
fig, axes = plt.subplots(2, figsize=(7,7))
ax = axes[0]
ax.plot(res_hamilton.filtered_marginal_probabilities[0])
ax.fill_between(usrec.index, 0, 1, where=usrec['USREC'].values, color='k', alpha=0.1)
ax.set_xlim(dta_hamilton.index[4], dta_hamilton.index[-1])
ax.set(title='Filtered probability of recession')
ax = axes[1]
ax.plot(res_hamilton.smoothed_marginal_probabilities[0])
ax.fill_between(usrec.index, 0, 1, where=usrec['USREC'].values, color='k', alpha=0.1)
ax.set_xlim(dta_hamilton.index[4], dta_hamilton.index[-1])
ax.set(title='Smoothed probability of recession')
fig.tight_layout()
print(res_hamilton.expected_durations)
# Get the dataset
ew_excs = requests.get('http://econ.korea.ac.kr/~cjkim/MARKOV/data/ew_excs.prn').content
raw = pd.read_table(BytesIO(ew_excs), header=None, skipfooter=1, engine='python')
raw.index = pd.date_range('1926-01-01', '1995-12-01', freq='MS')
dta_kns = raw.ix[:'1986'] - raw.ix[:'1986'].mean()
# Plot the dataset
dta_kns[0].plot(title='Excess returns', figsize=(12, 3))
# Fit the model
mod_kns = sm.tsa.MarkovRegression(dta_kns, k_regimes=3, trend='nc', switching_variance=True)
res_kns = mod_kns.fit()
res_kns.summary()
fig, axes = plt.subplots(3, figsize=(10,7))
ax = axes[0]
ax.plot(res_kns.smoothed_marginal_probabilities[0])
ax.set(title='Smoothed probability of a low-variance regime for stock returns')
ax = axes[1]
ax.plot(res_kns.smoothed_marginal_probabilities[1])
ax.set(title='Smoothed probability of a medium-variance regime for stock returns')
ax = axes[2]
ax.plot(res_kns.smoothed_marginal_probabilities[2])
ax.set(title='Smoothed probability of a high-variance regime for stock returns')
fig.tight_layout()
# Get the dataset
filardo = requests.get('http://econ.korea.ac.kr/~cjkim/MARKOV/data/filardo.prn').content
dta_filardo = pd.read_table(BytesIO(filardo), sep=' +', header=None, skipfooter=1, engine='python')
dta_filardo.columns = ['month', 'ip', 'leading']
dta_filardo.index = pd.date_range('1948-01-01', '1991-04-01', freq='MS')
dta_filardo['dlip'] = np.log(dta_filardo['ip']).diff()*100
# Deflated pre-1960 observations by ratio of std. devs.
# See hmt_tvp.opt or Filardo (1994) p. 302
std_ratio = dta_filardo['dlip']['1960-01-01':].std() / dta_filardo['dlip'][:'1959-12-01'].std()
dta_filardo['dlip'][:'1959-12-01'] = dta_filardo['dlip'][:'1959-12-01'] * std_ratio
dta_filardo['dlleading'] = np.log(dta_filardo['leading']).diff()*100
dta_filardo['dmdlleading'] = dta_filardo['dlleading'] - dta_filardo['dlleading'].mean()
# Plot the data
dta_filardo['dlip'].plot(title='Standardized growth rate of industrial production', figsize=(13,3))
plt.figure()
dta_filardo['dmdlleading'].plot(title='Leading indicator', figsize=(13,3));
mod_filardo = sm.tsa.MarkovAutoregression(
dta_filardo.ix[2:, 'dlip'], k_regimes=2, order=4, switching_ar=False,
exog_tvtp=sm.add_constant(dta_filardo.ix[1:-1, 'dmdlleading']))
np.random.seed(12345)
res_filardo = mod_filardo.fit(search_reps=20)
res_filardo.summary()
fig, ax = plt.subplots(figsize=(12,3))
ax.plot(res_filardo.smoothed_marginal_probabilities[0])
ax.fill_between(usrec.index, 0, 1, where=usrec['USREC'].values, color='gray', alpha=0.2)
ax.set_xlim(dta_filardo.index[6], dta_filardo.index[-1])
ax.set(title='Smoothed probability of a low-production state');
res_filardo.expected_durations[0].plot(
title='Expected duration of a low-production state', figsize=(12,3));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Imports
Step3: tf.data.Dataset
Step4: Let's have a look at the data
Step5: Keras model
Step6: Train and validate the model
Step7: Visualize predictions
|
<ASSISTANT_TASK:>
Python Code:
BATCH_SIZE = 128
EPOCHS = 10
training_images_file = 'gs://mnist-public/train-images-idx3-ubyte'
training_labels_file = 'gs://mnist-public/train-labels-idx1-ubyte'
validation_images_file = 'gs://mnist-public/t10k-images-idx3-ubyte'
validation_labels_file = 'gs://mnist-public/t10k-labels-idx1-ubyte'
import os, re, math, json, shutil, pprint
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import IPython.display as display
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
print("Tensorflow version " + tf.__version__)
#@title visualization utilities [RUN ME]
This cell contains helper functions used for visualization
and downloads only. You can skip reading it. There is very
little useful Keras/Tensorflow code here.
# Matplotlib config
plt.ioff()
plt.rc('image', cmap='gray_r')
plt.rc('grid', linewidth=1)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0', figsize=(16,9))
# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO)
def dataset_to_numpy_util(training_dataset, validation_dataset, N):
# get one batch from each: 10000 validation digits, N training digits
batch_train_ds = training_dataset.unbatch().batch(N)
# eager execution: loop through datasets normally
if tf.executing_eagerly():
for validation_digits, validation_labels in validation_dataset:
validation_digits = validation_digits.numpy()
validation_labels = validation_labels.numpy()
break
for training_digits, training_labels in batch_train_ds:
training_digits = training_digits.numpy()
training_labels = training_labels.numpy()
break
else:
v_images, v_labels = validation_dataset.make_one_shot_iterator().get_next()
t_images, t_labels = batch_train_ds.make_one_shot_iterator().get_next()
# Run once, get one batch. Session.run returns numpy results
with tf.Session() as ses:
(validation_digits, validation_labels,
training_digits, training_labels) = ses.run([v_images, v_labels, t_images, t_labels])
# these were one-hot encoded in the dataset
validation_labels = np.argmax(validation_labels, axis=1)
training_labels = np.argmax(training_labels, axis=1)
return (training_digits, training_labels,
validation_digits, validation_labels)
# create digits from local fonts for testing
def create_digits_from_local_fonts(n):
font_labels = []
img = PIL.Image.new('LA', (28*n, 28), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1
font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25)
font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25)
d = PIL.ImageDraw.Draw(img)
for i in range(n):
font_labels.append(i%10)
d.text((7+i*28,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2)
font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded)
font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [28, 28*n]), n, axis=1), axis=0), [n, 28*28])
return font_digits, font_labels
# utility to display a row of digits with their predictions
def display_digits(digits, predictions, labels, title, n):
fig = plt.figure(figsize=(13,3))
digits = np.reshape(digits, [n, 28, 28])
digits = np.swapaxes(digits, 0, 1)
digits = np.reshape(digits, [28, 28*n])
plt.yticks([])
plt.xticks([28*x+14 for x in range(n)], predictions)
plt.grid(b=None)
for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):
if predictions[i] != labels[i]: t.set_color('red') # bad predictions in red
plt.imshow(digits)
plt.grid(None)
plt.title(title)
display.display(fig)
# utility to display multiple rows of digits, sorted by unrecognized/recognized status
def display_top_unrecognized(digits, predictions, labels, n, lines):
idx = np.argsort(predictions==labels) # sort order: unrecognized first
for i in range(lines):
display_digits(digits[idx][i*n:(i+1)*n], predictions[idx][i*n:(i+1)*n], labels[idx][i*n:(i+1)*n],
"{} sample validation digits out of {} with bad predictions in red and sorted first".format(n*lines, len(digits)) if i==0 else "", n)
def plot_learning_rate(lr_func, epochs):
xx = np.arange(epochs+1, dtype=np.float)
y = [lr_decay(x) for x in xx]
fig, ax = plt.subplots(figsize=(9, 6))
ax.set_xlabel('epochs')
ax.set_title('Learning rate\ndecays from {:0.3g} to {:0.3g}'.format(y[0], y[-2]))
ax.minorticks_on()
ax.grid(True, which='major', axis='both', linestyle='-', linewidth=1)
ax.grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
ax.step(xx,y, linewidth=3, where='post')
display.display(fig)
class PlotTraining(tf.keras.callbacks.Callback):
def __init__(self, sample_rate=1, zoom=1):
self.sample_rate = sample_rate
self.step = 0
self.zoom = zoom
self.steps_per_epoch = 60000//BATCH_SIZE
def on_train_begin(self, logs={}):
self.batch_history = {}
self.batch_step = []
self.epoch_history = {}
self.epoch_step = []
self.fig, self.axes = plt.subplots(1, 2, figsize=(16, 7))
plt.ioff()
def on_batch_end(self, batch, logs={}):
if (batch % self.sample_rate) == 0:
self.batch_step.append(self.step)
for k,v in logs.items():
# do not log "batch" and "size" metrics that do not change
# do not log training accuracy "acc"
if k=='batch' or k=='size':# or k=='acc':
continue
self.batch_history.setdefault(k, []).append(v)
self.step += 1
def on_epoch_end(self, epoch, logs={}):
plt.close(self.fig)
self.axes[0].cla()
self.axes[1].cla()
self.axes[0].set_ylim(0, 1.2/self.zoom)
self.axes[1].set_ylim(1-1/self.zoom/2, 1+0.1/self.zoom/2)
self.epoch_step.append(self.step)
for k,v in logs.items():
# only log validation metrics
if not k.startswith('val_'):
continue
self.epoch_history.setdefault(k, []).append(v)
display.clear_output(wait=True)
for k,v in self.batch_history.items():
self.axes[0 if k.endswith('loss') else 1].plot(np.array(self.batch_step) / self.steps_per_epoch, v, label=k)
for k,v in self.epoch_history.items():
self.axes[0 if k.endswith('loss') else 1].plot(np.array(self.epoch_step) / self.steps_per_epoch, v, label=k, linewidth=3)
self.axes[0].legend()
self.axes[1].legend()
self.axes[0].set_xlabel('epochs')
self.axes[1].set_xlabel('epochs')
self.axes[0].minorticks_on()
self.axes[0].grid(True, which='major', axis='both', linestyle='-', linewidth=1)
self.axes[0].grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
self.axes[1].minorticks_on()
self.axes[1].grid(True, which='major', axis='both', linestyle='-', linewidth=1)
self.axes[1].grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
display.display(self.fig)
AUTO = tf.data.experimental.AUTOTUNE
def read_label(tf_bytestring):
label = tf.io.decode_raw(tf_bytestring, tf.uint8)
label = tf.reshape(label, [])
label = tf.one_hot(label, 10)
return label
def read_image(tf_bytestring):
image = tf.io.decode_raw(tf_bytestring, tf.uint8)
image = tf.cast(image, tf.float32)/256.0
image = tf.reshape(image, [28*28])
return image
def load_dataset(image_file, label_file):
imagedataset = tf.data.FixedLengthRecordDataset(image_file, 28*28, header_bytes=16)
imagedataset = imagedataset.map(read_image, num_parallel_calls=16)
labelsdataset = tf.data.FixedLengthRecordDataset(label_file, 1, header_bytes=8)
labelsdataset = labelsdataset.map(read_label, num_parallel_calls=16)
dataset = tf.data.Dataset.zip((imagedataset, labelsdataset))
return dataset
def get_training_dataset(image_file, label_file, batch_size):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.shuffle(5000, reshuffle_each_iteration=True)
dataset = dataset.repeat() # Mandatory for Keras for now
dataset = dataset.batch(batch_size, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed
dataset = dataset.prefetch(AUTO) # fetch next batches while training on the current one (-1: autotune prefetch buffer size)
return dataset
def get_validation_dataset(image_file, label_file):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch
dataset = dataset.repeat() # Mandatory for Keras for now
return dataset
# instantiate the datasets
training_dataset = get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_dataset = get_validation_dataset(validation_images_file, validation_labels_file)
# For TPU, we will need a function that returns the dataset
training_input_fn = lambda: get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_input_fn = lambda: get_validation_dataset(validation_images_file, validation_labels_file)
N = 24
(training_digits, training_labels,
validation_digits, validation_labels) = dataset_to_numpy_util(training_dataset, validation_dataset, N)
display_digits(training_digits, training_labels, training_labels, "training digits and their labels", N)
display_digits(validation_digits[:N], validation_labels[:N], validation_labels[:N], "validation digits and their labels", N)
font_digits, font_labels = create_digits_from_local_fonts(N)
model = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(28*28,)),
tf.keras.layers.Dense(200, activation='relu'),
tf.keras.layers.Dense(60, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# print model layers
model.summary()
# utility callback that displays training curves
plot_training = PlotTraining(sample_rate=10, zoom=1)
steps_per_epoch = 60000//BATCH_SIZE # 60,000 items in this dataset
print("Steps per epoch: ", steps_per_epoch)
history = model.fit(training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,
validation_data=validation_dataset, validation_steps=1, callbacks=[plot_training])
# recognize digits from local fonts
probabilities = model.predict(font_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_digits(font_digits, predicted_labels, font_labels, "predictions from local fonts (bad predictions in red)", N)
# recognize validation digits
probabilities = model.predict(validation_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_top_unrecognized(validation_digits, predicted_labels, validation_labels, N, 7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The download takes awhile; it's a big distribution!
Step2: This part will take awhile, depending largely on your internet connection. Go grab some coffee!
Step3: This is a great command to test exactly what version of python is being executed. In this case, it's a version I'd installed previous, at /opt/python. That is definitely not the version I just installed. How do I alert my operating system that, when I type a command starting with python, I want it to refer to the local one I just installed?
Step4: As you can see, I already have four Python environments up and running for several different versions of Python.
Step5: Ta-daa!
Step6: Addendum
Step7: There you go! Two identical but completely distinct environments.
Step8: This creates a file myenv2.yaml (which you can name whatever you want) from the "myenv2" environment. Here are the contents of the file
Step9: It's incredibly detailed, down to the exact versions of each package used in the environment. Short of the operating system itself, you can use this to duplicate environments exactly on different computers.
|
<ASSISTANT_TASK:>
Python Code:
# Step 1: right-click the "download" link on the left
# Step 2: select "copy link address"
# Step 3: paste the link into the following bash command, after "wget"
!wget https://repo.continuum.io/archive/Anaconda3-4.4.0-Linux-x86_64.sh
!ls # This will show us the files in our current directory
# This is an easy one-liner, but it's absolutely necessary: it makes the file *executable*
!chmod +x Anaconda3-4.4.0-Linux-x86_64.sh
# The "-b" flag means "batch", which means the install won't stop to ask us pesky questions
# The "-p" flag expects a path where Python will be installed. I've provided a local one
!bash Anaconda3-4.4.0-Linux-x86_64.sh -b -p ./conda-install
!which python
!conda env list
!conda create -n myenv -y python=2.7 scikit-learn numpy scipy matplotlib
!conda env list
!conda create -n myenv2 --clone myenv
!conda env list
!conda env export -n myenv2 -f myenv2.yaml
# %load myenv2.yaml
name: myenv2
channels:
- menpo
- conda-forge
- defaults
dependencies:
- backports_abc=0.5=py27_0
- blas=1.1=openblas
- ca-certificates=2017.7.27.1=0
- certifi=2017.7.27.1=py27_0
- cycler=0.10.0=py27_0
- dbus=1.10.10=3
- expat=2.2.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- functools32=3.2.3.2=py27_1
- gettext=0.19.7=1
- glib=2.51.4=0
- gst-plugins-base=1.8.0=0
- gstreamer=1.8.0=2
- icu=58.1=1
- jpeg=9b=0
- libffi=3.2.1=3
- libiconv=1.14=4
- libpng=1.6.28=0
- libxcb=1.12=1
- libxml2=2.9.4=4
- matplotlib=2.0.2=py27_2
- ncurses=5.9=10
- numpy=1.13.1=py27_blas_openblas_200
- openblas=0.2.19=2
- openssl=1.0.2l=0
- pcre=8.39=0
- pip=9.0.1=py27_0
- pyparsing=2.2.0=py27_0
- pyqt=5.6.0=py27_4
- python=2.7.13=1
- python-dateutil=2.6.1=py27_0
- pytz=2017.2=py27_0
- qt=5.6.2=3
- readline=6.2=0
- scikit-learn=0.19.0=py27_blas_openblas_201
- scipy=0.19.1=py27_blas_openblas_202
- setuptools=36.2.2=py27_0
- singledispatch=3.4.0.3=py27_0
- sip=4.18=py27_1
- six=1.10.0=py27_1
- sqlite=3.13.0=1
- ssl_match_hostname=3.5.0.1=py27_1
- subprocess32=3.2.7=py27_0
- tk=8.5.19=2
- tornado=4.5.1=py27_0
- wheel=0.29.0=py27_0
- xorg-libxau=1.0.8=3
- xorg-libxdmcp=1.1.2=3
- xz=5.2.2=0
- zlib=1.2.11=0
- libgfortran=3.0.0=1
- pip:
- backports-abc==0.5
- backports.ssl-match-hostname==3.5.0.1
prefix: /opt/python/envs/myenv2
!conda env create -n myenv3 -f myenv2.yaml
!conda env list
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below we demonstrate both local and global model interpretability for gradient boosted trees.
Step2: Interpret model
Step4: Local interpretability
Step5: Plot results
Step9: Prettier plotting
Step10: Global feature importances
Step11: ??? What does the x axis represent?
Step12: We can also see how DFCs vary as a feature value varies.
Step13: Visualizing the model's prediction surface
Step15: We can visualize our function
Step16: First let's try to fit a linear model to the data.
Step17: Not very good at all...
|
<ASSISTANT_TASK:>
Python Code:
import time
# We will use some np and pandas for dealing with input data.
import numpy as np
import pandas as pd
# And of course, we need tensorflow.
import tensorflow as tf
from matplotlib import pyplot as plt
from IPython.display import clear_output
tf.__version__
tf.logging.set_verbosity(tf.logging.ERROR)
tf.set_random_seed(123)
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
# Feature columns.
fcol = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return fcol.indicator_column(
fcol.categorical_column_with_vocabulary_list(feature_name,
vocab))
fc = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
fc.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
fc.append(fcol.numeric_column(feature_name,
dtype=tf.float32))
# Input functions.
def make_input_fn(X, y, n_epochs=None):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((X.to_dict(orient='list'), y))
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = (dataset
.repeat(n_epochs)
.batch(len(y))) # Use entire dataset since this is such a small dataset.
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, n_epochs=1)
params = {
'n_trees': 50,
'max_depth': 3,
'n_batches_per_layer': 1,
# You must enable center_bias = True to get DFCs. This will force the model to
# make an initial prediction before using any features (e.g. use the mean of
# the training labels for regression or log odds for classification when
# using cross entropy loss).
'center_bias': True
}
est = tf.estimator.BoostedTreesClassifier(fc, **params)
# Train model.
est.train(train_input_fn)
# Evaluation.
results = est.evaluate(eval_input_fn)
clear_output()
pd.Series(results).to_frame()
import matplotlib.pyplot as plt
import seaborn as sns
sns_colors = sns.color_palette('colorblind')
pred_dicts = list(est.experimental_predict_with_explanations(eval_input_fn))
def clean_feature_names(df):
Boilerplate code to cleans up feature names -- this is unneed in TF 2.0
df.columns = [v.split(':')[0].split('_indi')[0] for v in df.columns.tolist()]
df = df.T.groupby(level=0).sum().T
return df
# Create DFC Pandas dataframe.
labels = y_eval.values
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
df_dfc = pd.DataFrame([pred['dfc'] for pred in pred_dicts])
df_dfc.columns = est._names_for_feature_id
df_dfc = clean_feature_names(df_dfc)
df_dfc.describe()
# Sum of DFCs + bias == probabality.
bias = pred_dicts[0]['bias']
dfc_prob = df_dfc.sum(axis=1) + bias
np.testing.assert_almost_equal(dfc_prob.values,
probs.values)
import seaborn as sns # Make plotting nicer.
sns_colors = sns.color_palette('colorblind')
def plot_dfcs(example_id):
label, prob = labels[ID], probs[ID]
example = df_dfc.iloc[ID] # Choose ith example from evaluation set.
TOP_N = 8 # View top 8 features.
sorted_ix = example.abs().sort_values()[-TOP_N:].index
ax = example[sorted_ix].plot(kind='barh', color='g', figsize=(10,5))
ax.grid(False, axis='y')
plt.title('Feature contributions for example {}\n pred: {:1.2f}; label: {}'.format(ID, prob, label))
plt.xlabel('Contribution to predicted probability')
ID = 102 # Change this.
plot_dfcs(ID)
def plot_example_pretty(example):
Boilerplate code for better plotting :)
def _get_color(value):
To make positive DFCs plot green, negative DFCs plot red.
green, red = sns.color_palette()[2:4]
if value >= 0: return green
return red
def _add_feature_values(feature_values, ax):
Display feature's values on left of plot.
x_coord = ax.get_xlim()[0]
OFFSET = 0.15
for y_coord, (feat_name, feat_val) in enumerate(feature_values.items()):
t = plt.text(x_coord, y_coord - OFFSET, '{}'.format(feat_val), size=12)
t.set_bbox(dict(facecolor='white', alpha=0.5))
from matplotlib.font_manager import FontProperties
font = FontProperties()
font.set_weight('bold')
t = plt.text(x_coord, y_coord + 1 - OFFSET, 'feature\nvalue',
fontproperties=font, size=12)
TOP_N = 8 # View top 8 features.
sorted_ix = example.abs().sort_values()[-TOP_N:].index # Sort by magnitude.
example = example[sorted_ix]
colors = example.map(_get_color).tolist()
ax = example.to_frame().plot(kind='barh',
color=[colors],
legend=None,
alpha=0.75,
figsize=(10,6))
ax.grid(False, axis='y')
ax.set_yticklabels(ax.get_yticklabels(), size=14)
_add_feature_values(dfeval.iloc[ID].loc[sorted_ix], ax)
ax.set_title('Feature contributions for example {}\n pred: {:1.2f}; label: {}'.format(ID, probs[ID], labels[ID]))
ax.set_xlabel('Contribution to predicted probability', size=14)
plt.show()
return ax
# Plot results.
ID = 102
example = df_dfc.iloc[ID] # Choose ith example from evaluation set.
ax = plot_example_pretty(example)
features, importances = est.experimental_feature_importances(normalize=True)
df_imp = pd.DataFrame(importances, columns=['importances'], index=features)
# For plotting purposes. This is not needed in TF 2.0.
df_imp = clean_feature_names(df_imp.T).T.sort_values('importances', ascending=False)
# Visualize importances.
N = 8
ax = df_imp.iloc[0:N][::-1]\
.plot(kind='barh',
color=sns_colors[0],
title='Gain feature importances',
figsize=(10, 6))
ax.grid(False, axis='y')
plt.tight_layout()
# Plot.
dfc_mean = df_dfc.abs().mean()
sorted_ix = dfc_mean.abs().sort_values()[-8:].index # Average and sort by absolute.
ax = dfc_mean[sorted_ix].plot(kind='barh',
color=sns_colors[1],
title='Mean |directional feature contributions|',
figsize=(10, 6))
ax.grid(False, axis='y')
age = pd.Series(df_dfc.age.values, index=dfeval.age.values).sort_index()
sns.jointplot(age.index.values, age.values);
from numpy.random import uniform, seed
from matplotlib.mlab import griddata
# Create fake data
seed(0)
npts = 5000
x = uniform(-2, 2, npts)
y = uniform(-2, 2, npts)
z = x*np.exp(-x**2 - y**2)
# Prep data for training.
df = pd.DataFrame({'x': x, 'y': y, 'z': z})
xi = np.linspace(-2.0, 2.0, 200),
yi = np.linspace(-2.1, 2.1, 210),
xi,yi = np.meshgrid(xi, yi)
df_predict = pd.DataFrame({
'x' : xi.flatten(),
'y' : yi.flatten(),
})
predict_shape = xi.shape
def plot_contour(x, y, z, **kwargs):
# Grid the data.
plt.figure(figsize=(10, 8))
# Contour the gridded data, plotting dots at the nonuniform data points.
CS = plt.contour(x, y, z, 15, linewidths=0.5, colors='k')
CS = plt.contourf(x, y, z, 15,
vmax=abs(zi).max(), vmin=-abs(zi).max(), cmap='RdBu_r')
plt.colorbar() # Draw colorbar.
# Plot data points.
plt.xlim(-2, 2)
plt.ylim(-2, 2)
zi = griddata(x, y, z, xi, yi, interp='linear')
plot_contour(xi, yi, zi)
plt.scatter(df.x, df.y, marker='.')
plt.title('Contour on training data')
plt.show()
def predict(est):
Predictions from a given estimator.
predict_input_fn = lambda: tf.data.Dataset.from_tensors(dict(df_predict))
preds = np.array([p['predictions'][0] for p in est.predict(predict_input_fn)])
return preds.reshape(predict_shape)
fc = [tf.feature_column.numeric_column('x'),
tf.feature_column.numeric_column('y')]
train_input_fn = make_input_fn(df, df.z)
est = tf.estimator.LinearRegressor(fc)
est.train(train_input_fn, max_steps=500);
plot_contour(xi, yi, predict(est))
for n_trees in [1,2,3,10,30,50,100,200]:
est = tf.estimator.BoostedTreesRegressor(fc,
n_batches_per_layer=1,
max_depth=4,
n_trees=n_trees)
est.train(train_input_fn)
plot_contour(xi, yi, predict(est))
plt.text(-1.8, 2.1, '# trees: {}'.format(n_trees), color='w', backgroundcolor='black', size=20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'pcmdi', 'sandbox-1', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: You can quickly test if the model builds successfully.
Step3: There are many other types of hyperparameters as well. We can define multiple
Step4: As shown below, the hyperparameters are actual values. In fact, they are just
Step5: You can also define the hyperparameters in advance and keep your Keras code in
Step6: Each of the hyperparameters is uniquely identified by its name (the first
Step7: Start the search
Step8: You can print a summary of the search space
Step9: Before starting the search, let's prepare the MNIST dataset.
Step10: Then, start the search for the best hyperparameter configuration.
Step11: During the search, the model-building function is called with different
Step12: You can also print a summary of the search results.
Step13: You will find detailed logs, checkpoints, etc, in the folder
Step14: Tune model training
Step15: Again, we can do a quick check to see if the code works correctly.
Step16: Tune data preprocessing
Step17: If a hyperparameter is used both in build() and fit(), you can define it in
Step18: Retrain the model
Step19: Specify the tuning objective
Step20: Custom metric as the objective
Step21: Run the search with the custom objective.
Step22: If your custom objective is hard to put into a custom metric, you can also
Step23: If you have multiple metrics to track in KerasTuner, but only use one of them
Step24: Tune end-to-end workflows
Step25: Keep Keras code separate
Step26: KerasTuner includes pre-made tunable applications
|
<ASSISTANT_TASK:>
Python Code:
!pip install keras-tuner -q
from tensorflow import keras
from tensorflow.keras import layers
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
# Define the hyperparameter.
units=hp.Int("units", min_value=32, max_value=512, step=32),
activation="relu",
)
)
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"],
)
return model
import keras_tuner
build_model(keras_tuner.HyperParameters())
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
# Tune number of units.
units=hp.Int("units", min_value=32, max_value=512, step=32),
# Tune the activation function to use.
activation=hp.Choice("activation", ["relu", "tanh"]),
)
)
# Tune whether to use dropout.
if hp.Boolean("dropout"):
model.add(layers.Dropout(rate=0.25))
model.add(layers.Dense(10, activation="softmax"))
# Define the optimizer learning rate as a hyperparameter.
learning_rate = hp.Float("lr", min_value=1e-4, max_value=1e-2, sampling="log")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
build_model(keras_tuner.HyperParameters())
hp = keras_tuner.HyperParameters()
print(hp.Int("units", min_value=32, max_value=512, step=32))
def call_existing_code(units, activation, dropout, lr):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(layers.Dense(units=units, activation=activation))
if dropout:
model.add(layers.Dropout(rate=0.25))
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=lr),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
def build_model(hp):
units = hp.Int("units", min_value=32, max_value=512, step=32)
activation = hp.Choice("activation", ["relu", "tanh"])
dropout = hp.Boolean("dropout")
lr = hp.Float("lr", min_value=1e-4, max_value=1e-2, sampling="log")
# call existing model-building code with the hyperparameter values.
model = call_existing_code(
units=units, activation=activation, dropout=dropout, lr=lr
)
return model
build_model(keras_tuner.HyperParameters())
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten())
# Tune the number of layers.
for i in range(hp.Int("num_layers", 1, 3)):
model.add(
layers.Dense(
# Tune number of units separately.
units=hp.Int(f"units_{i}", min_value=32, max_value=512, step=32),
activation=hp.Choice("activation", ["relu", "tanh"]),
)
)
if hp.Boolean("dropout"):
model.add(layers.Dropout(rate=0.25))
model.add(layers.Dense(10, activation="softmax"))
learning_rate = hp.Float("lr", min_value=1e-4, max_value=1e-2, sampling="log")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
build_model(keras_tuner.HyperParameters())
tuner = keras_tuner.RandomSearch(
hypermodel=build_model,
objective="val_accuracy",
max_trials=3,
executions_per_trial=2,
overwrite=True,
directory="my_dir",
project_name="helloworld",
)
tuner.search_space_summary()
from tensorflow import keras
import numpy as np
(x, y), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x[:-10000]
x_val = x[-10000:]
y_train = y[:-10000]
y_val = y[-10000:]
x_train = np.expand_dims(x_train, -1).astype("float32") / 255.0
x_val = np.expand_dims(x_val, -1).astype("float32") / 255.0
x_test = np.expand_dims(x_test, -1).astype("float32") / 255.0
num_classes = 10
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
tuner.search(x_train, y_train, epochs=2, validation_data=(x_val, y_val))
# Get the top 2 models.
models = tuner.get_best_models(num_models=2)
best_model = models[0]
# Build the model.
# Needed for `Sequential` without specified `input_shape`.
best_model.build(input_shape=(None, 28, 28))
best_model.summary()
tuner.results_summary()
# Get the top 2 hyperparameters.
best_hps = tuner.get_best_hyperparameters(5)
# Build the model with the best hp.
model = build_model(best_hps[0])
# Fit with the entire dataset.
x_all = np.concatenate((x_train, x_val))
y_all = np.concatenate((y_train, y_val))
model.fit(x=x_all, y=y_all, epochs=1)
class MyHyperModel(keras_tuner.HyperModel):
def build(self, hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
units=hp.Int("units", min_value=32, max_value=512, step=32),
activation="relu",
)
)
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"],
)
return model
def fit(self, hp, model, *args, **kwargs):
return model.fit(
*args,
# Tune whether to shuffle the data in each epoch.
shuffle=hp.Boolean("shuffle"),
**kwargs,
)
hp = keras_tuner.HyperParameters()
hypermodel = MyHyperModel()
model = hypermodel.build(hp)
hypermodel.fit(hp, model, np.random.rand(100, 28, 28), np.random.rand(100, 10))
class MyHyperModel(keras_tuner.HyperModel):
def build(self, hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
units=hp.Int("units", min_value=32, max_value=512, step=32),
activation="relu",
)
)
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"],
)
return model
def fit(self, hp, model, x, y, **kwargs):
if hp.Boolean("normalize"):
x = layers.Normalization()(x)
return model.fit(
x,
y,
# Tune whether to shuffle the data in each epoch.
shuffle=hp.Boolean("shuffle"),
**kwargs,
)
hp = keras_tuner.HyperParameters()
hypermodel = MyHyperModel()
model = hypermodel.build(hp)
hypermodel.fit(hp, model, np.random.rand(100, 28, 28), np.random.rand(100, 10))
class MyHyperModel(keras_tuner.HyperModel):
def build(self, hp):
image_size = hp.Int("image_size", 10, 28)
inputs = keras.Input(shape=(image_size, image_size))
outputs = layers.Flatten()(inputs)
outputs = layers.Dense(
units=hp.Int("units", min_value=32, max_value=512, step=32),
activation="relu",
)(outputs)
outputs = layers.Dense(10, activation="softmax")(outputs)
model = keras.Model(inputs, outputs)
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"],
)
return model
def fit(self, hp, model, x, y, validation_data=None, **kwargs):
if hp.Boolean("normalize"):
x = layers.Normalization()(x)
image_size = hp.get("image_size")
cropped_x = x[:, :image_size, :image_size, :]
if validation_data:
x_val, y_val = validation_data
cropped_x_val = x_val[:, :image_size, :image_size, :]
validation_data = (cropped_x_val, y_val)
return model.fit(
cropped_x,
y,
# Tune whether to shuffle the data in each epoch.
shuffle=hp.Boolean("shuffle"),
validation_data=validation_data,
**kwargs,
)
tuner = keras_tuner.RandomSearch(
MyHyperModel(),
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="tune_hypermodel",
)
tuner.search(x_train, y_train, epochs=2, validation_data=(x_val, y_val))
hypermodel = MyHyperModel()
best_hp = tuner.get_best_hyperparameters()[0]
model = hypermodel.build(best_hp)
hypermodel.fit(best_hp, model, x_all, y_all, epochs=1)
def build_regressor(hp):
model = keras.Sequential(
[
layers.Dense(units=hp.Int("units", 32, 128, 32), activation="relu"),
layers.Dense(units=1),
]
)
model.compile(
optimizer="adam",
loss="mean_squared_error",
# Objective is one of the metrics.
metrics=[keras.metrics.MeanAbsoluteError()],
)
return model
tuner = keras_tuner.RandomSearch(
hypermodel=build_regressor,
# The objective name and direction.
# Name is the f"val_{snake_case_metric_class_name}".
objective=keras_tuner.Objective("val_mean_absolute_error", direction="min"),
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="built_in_metrics",
)
tuner.search(
x=np.random.rand(100, 10),
y=np.random.rand(100, 1),
validation_data=(np.random.rand(20, 10), np.random.rand(20, 1)),
)
tuner.results_summary()
import tensorflow as tf
class CustomMetric(keras.metrics.Metric):
def __init__(self, **kwargs):
# Specify the name of the metric as "custom_metric".
super().__init__(name="custom_metric", **kwargs)
self.sum = self.add_weight(name="sum", initializer="zeros")
self.count = self.add_weight(name="count", dtype=tf.int32, initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
values = tf.math.squared_difference(y_pred, y_true)
count = tf.shape(y_true)[0]
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
values *= sample_weight
count *= sample_weight
self.sum.assign_add(tf.reduce_sum(values))
self.count.assign_add(count)
def result(self):
return self.sum / tf.cast(self.count, tf.float32)
def reset_states(self):
self.sum.assign(0)
self.count.assign(0)
def build_regressor(hp):
model = keras.Sequential(
[
layers.Dense(units=hp.Int("units", 32, 128, 32), activation="relu"),
layers.Dense(units=1),
]
)
model.compile(
optimizer="adam",
loss="mean_squared_error",
# Put custom metric into the metrics.
metrics=[CustomMetric()],
)
return model
tuner = keras_tuner.RandomSearch(
hypermodel=build_regressor,
# Specify the name and direction of the objective.
objective=keras_tuner.Objective("val_custom_metric", direction="min"),
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="custom_metrics",
)
tuner.search(
x=np.random.rand(100, 10),
y=np.random.rand(100, 1),
validation_data=(np.random.rand(20, 10), np.random.rand(20, 1)),
)
tuner.results_summary()
class HyperRegressor(keras_tuner.HyperModel):
def build(self, hp):
model = keras.Sequential(
[
layers.Dense(units=hp.Int("units", 32, 128, 32), activation="relu"),
layers.Dense(units=1),
]
)
model.compile(
optimizer="adam", loss="mean_squared_error",
)
return model
def fit(self, hp, model, x, y, validation_data, **kwargs):
model.fit(x, y, **kwargs)
x_val, y_val = validation_data
y_pred = model.predict(x_val)
# Return a single float to minimize.
return np.mean(np.abs(y_pred - y_val))
tuner = keras_tuner.RandomSearch(
hypermodel=HyperRegressor(),
# No objective to specify.
# Objective is the return value of `HyperModel.fit()`.
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="custom_eval",
)
tuner.search(
x=np.random.rand(100, 10),
y=np.random.rand(100, 1),
validation_data=(np.random.rand(20, 10), np.random.rand(20, 1)),
)
tuner.results_summary()
class HyperRegressor(keras_tuner.HyperModel):
def build(self, hp):
model = keras.Sequential(
[
layers.Dense(units=hp.Int("units", 32, 128, 32), activation="relu"),
layers.Dense(units=1),
]
)
model.compile(
optimizer="adam", loss="mean_squared_error",
)
return model
def fit(self, hp, model, x, y, validation_data, **kwargs):
model.fit(x, y, **kwargs)
x_val, y_val = validation_data
y_pred = model.predict(x_val)
# Return a dictionary of metrics for KerasTuner to track.
return {
"metric_a": -np.mean(np.abs(y_pred - y_val)),
"metric_b": np.mean(np.square(y_pred - y_val)),
}
tuner = keras_tuner.RandomSearch(
hypermodel=HyperRegressor(),
# Objective is one of the keys.
# Maximize the negative MAE, equivalent to minimize MAE.
objective=keras_tuner.Objective("metric_a", "max"),
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="custom_eval_dict",
)
tuner.search(
x=np.random.rand(100, 10),
y=np.random.rand(100, 1),
validation_data=(np.random.rand(20, 10), np.random.rand(20, 1)),
)
tuner.results_summary()
class MyTuner(keras_tuner.RandomSearch):
def run_trial(self, trial, *args, **kwargs):
# Get the hp from trial.
hp = trial.hyperparameters
# Define "x" as a hyperparameter.
x = hp.Float("x", min_value=-1.0, max_value=1.0)
# Return the objective value to minimize.
return x * x + 1
tuner = MyTuner(
# No hypermodel or objective specified.
max_trials=20,
overwrite=True,
directory="my_dir",
project_name="tune_anything",
)
# No need to pass anything to search()
# unless you use them in run_trial().
tuner.search()
print(tuner.get_best_hyperparameters()[0].get("x"))
import os
def keras_code(units, optimizer, saving_path):
# Build model
model = keras.Sequential(
[layers.Dense(units=units, activation="relu"), layers.Dense(units=1),]
)
model.compile(
optimizer=optimizer, loss="mean_squared_error",
)
# Prepare data
x_train = np.random.rand(100, 10)
y_train = np.random.rand(100, 1)
x_val = np.random.rand(20, 10)
y_val = np.random.rand(20, 1)
# Train & eval model
model.fit(x_train, y_train)
# Save model
model.save(saving_path)
# Return a single float as the objective value.
# You may also return a dictionary
# of {metric_name: metric_value}.
y_pred = model.predict(x_val)
return np.mean(np.abs(y_pred - y_val))
class MyTuner(keras_tuner.RandomSearch):
def run_trial(self, trial, **kwargs):
hp = trial.hyperparameters
return keras_code(
units=hp.Int("units", 32, 128, 32),
optimizer=hp.Choice("optimizer", ["adam", "adadelta"]),
saving_path=os.path.join("/tmp", trial.trial_id),
)
tuner = MyTuner(
max_trials=3, overwrite=True, directory="my_dir", project_name="keep_code_separate",
)
tuner.search()
# Retraining the model
best_hp = tuner.get_best_hyperparameters()[0]
keras_code(**best_hp.values, saving_path="/tmp/best_model")
from keras_tuner.applications import HyperResNet
hypermodel = HyperResNet(input_shape=(28, 28, 1), classes=10)
tuner = keras_tuner.RandomSearch(
hypermodel,
objective="val_accuracy",
max_trials=2,
overwrite=True,
directory="my_dir",
project_name="built_in_hypermodel",
)
tuner.search(
x_train[:100], y_train[:100], epochs=1, validation_data=(x_val[:100], y_val[:100])
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'dwd', 'sandbox-3', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: images 是用來訓練與測試的資料,label 則為每一筆影像資料對應的正確答案,每一張手寫圖片都是 28 x 28 的灰階 Bit Map
Step2: 建立準備訓練的神經網路
Step3: 上面這裡是神經網路的核心組成方式,我們在全連接層建立了兩層,由一個有 512 個神經元的網路架構連接到 10 個神經元的輸出層。輸出層採用 softmax 表示數字 0~9 的機率分配,這 10 個數字的總和將會是 1。以下將我們建立的網路進行 compile,這裡詳細的參數以後會介紹。
Step4: 以下將資料正規劃成為 0~1 的數值,變成 60000, 28x28 Shape 好送進上面定義的網路輸入層。
Step5: 由於我們使用的 categorical_crossentropy 損失函數,因此將標記資料進行格式轉換。如下:
Step6: 進行訓練模型,訓練中的正確率應該會在 0.989 左右
Step7: 將訓練後的模型輸入測試資料進行評比,一般說這樣的正確率應該會在 0.977% 左右
Step8: 為什麽訓練時的正確率會高於驗證測試呢?在這樣數據中,由於模型訓練時對訓練資料造成些微的過度擬合 (Over Fitting) 。一般來說這樣的情況是正常的,未來我們可以透過參數的調整或其他方法提高正確性。
Step9: 以下程式可以繪製訓練過程的正確率變化。訓練的過程中,當 Accuracy 後期並有沒太大的變化,表示 Model 很快就在假設空間裡進行不錯的收斂。
|
<ASSISTANT_TASK:>
Python Code:
from keras.datasets import mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images.shape
len(train_labels)
train_labels
test_images.shape
len(test_labels)
test_labels
from keras import models
from keras import layers
network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
network.add(layers.Dense(10, activation='softmax'))
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
fix_train_images = train_images.reshape((60000, 28 * 28)).astype('float32') / 255
fix_test_images = test_images.reshape((10000, 28 * 28)).astype('float32') / 255
from keras.utils import to_categorical
fix_train_labels = to_categorical(train_labels)
fix_test_labels = to_categorical(test_labels)
result = network.fit(
fix_train_images,
fix_train_labels,
epochs=20,
batch_size=128,
validation_data=(fix_test_images, fix_test_labels))
test_loss, test_acc = network.evaluate(fix_test_images, fix_test_labels)
print('test_loss:', test_loss)
print('test_acc:', test_acc)
history_dict = result.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) + 1)
import matplotlib.pyplot as plt
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Boolean
Step2: Números
Step3: Ao dividir dois números (inteiros ou flutuantes), podemos fazer a divisão comum (/) ou divisão inteira (//), onde a última mantém apenas a parte inteira do resultado
Step4: Existe também o operador de potência, permitindo elevar um número ao outro.
Step5: Python, assim como muitas linguagens, conta com um módulo que implementa diversas funções matemáticas (raíz, logaritmo, topo, piso, etc.
Step6: Python também conta um módulo de funções para operações randômicas
Step7: Strings
Step8: Podemos facilmente substituir a ocorrência de um caractere/substring utilizando o método replace
Step9: Outra operação interessante de limpeza de strings é o strip, o qual remove os espaços em branco no começo e no final da string
Step11: Também podemos criar string mais longas, com múltiplas linhas, utilizando um total de 6 aspas (3 para abrir e 3 para fechar)
Step12: Quando temos strings que contém múltiplas informações e desejamos separá-la, podemos usar a função split, informando o caractere/substring a ser utilizado para dividir a string
Step13: Fato interessante
Step14: Existem algumas maneiras de interpolar strings, ou seja, injetar valores dentro dela
Step15: Outra maneira de interpolar strings é usando o format
Step16: Condições
Step17: Atribuição ternário
Step18: Laços / Loops
Step19: Entretanto, o for funciona iterando sobre uma coleção/sequência de valores. Existem diversas maneiras de fazer isso
Step20: Funções
Step21: Manipulando Funções
Step22: Existem também as funções anônimas (chamadas, em Python, de lambda). Uma função anônima é uma função que contém apenas a assinatura e o corpo, sem um nome definido. Em Python, elas recebem o nome de lambda pois elas são funções de um comando só (diferente de linguagens como Javascript e Scala, que permitem funções anônimas de múltiplas linhas).
Step23: Data
Step24: Listas
Step25: Existem diversas maneiras de acessar os elementos de uma lista
Step26: O operador len pode ser utilizado para medir tamanhos de objetos com múltiplas informações (strings, lista, etc.)
Step27: Podemos somar elementos de uma lista simplesmente usando a função sum
Step28: Podemos também consultar a pertinência de um elemento em uma lista utilizando o operador in
Step29: Em Python, as listas são mutáveis, ou seja, ela pode ser modificada.
Step30: Por padrão, as variáveis em Python (quando apontando para tipos de dados mais complexos) são ponteiros, ou seja, elas são uma referência a um dado alocado na memória. Logo, quando salvamos o valor de uma variável A em uma variável B, estamos apenas dizendo que B aponta para a mesma informação que A
Step31: Embora a lista seja mutável, as operações de acesso aos elementos (com o operador [ ]) retornam novos elementos. Logo, podemos fazer uma cópia de uma lista da seguinte maneira
Step32: Existem duas maneiras de se ordenar uma lista em Python
Step33: O módulo random contém também um conjunto de funções para trabalharmos com listas. Podemos embaralhá-las, escolher um elemento aleatório ou obter uma amostra de tamanho n
Step34: A representação padrão de lista em string é uma sequência das strings de seu conteúdo, separado por ', ' e cercados por colchetes. O método join, de strings, existe para usar uma string como separador de uma lista, conforme o exemplo abaixo
Step35: OBS
Step36: Operações funcionais sobre listas
Step37: O retorno do map é iterador, ou seja, os seus valores só existirão qdo forem chamados (seja em um for, outro map ou fazendo um cast para lista) e, uma vez que forem consumidos, o iterador fica vazio
Step38: Filtragem
Step39: Novamente temos um iterator como retorno. Podemos utilizar as mesmas abordagens do iteratoor apresentado no map
Step40: Combinando transformação e filtragem
Step41: Tuplas
Step42: Dicionários
Step43: Em Python, podemos acessar, separadamente, suas chaves e seus valores
|
<ASSISTANT_TASK:>
Python Code:
a = None
if a is None:
print('nulo')
a = True
b = False
c = True
print(a == b)
print(a == c)
print(type(1))
print(type(1.2))
print("Divisao: ",1 / 2)
print("Divisao inteira: ",1 // 2)
print(2 ** 3)
print(25 ** (1/2))
import math
print(math.sqrt(2))
print(math.log(2))
print(math.ceil(2 ** 0.5))
print(math.floor(2 ** 0.5))
import random
print(random.random()) # normalizado entre 0 e 1
print(random.randint(1,10)) # intervalo fechado
single_quoted_string = 'Data Science'
print(single_quoted_string, type(single_quoted_string))
print(len(single_quoted_string)) # tamanho da string
print(single_quoted_string.upper()) # colocar caracteres em maiusculo
print(single_quoted_string.lower()) # colocar caracteres em minusculo
str1 = 'Hoje_vai_chover_novamente.'.replace('_', ' ')
print(str1)
print(' Hello '.strip()) # remove no começo e no final
print(' Hello '.lstrip()) # remove no começo
print(' Hello '.rstrip()) # remove no final
multi_line_string = linha 1
linha 2
linha 3
print(multi_line_string)
print(repr(multi_line_string))
multi_line_string.split("\n")
print("Big" + " " + "Data")
print('Repete ' * 5)
# %d e %f indicam, respectivamente, que um número inteiro ou ponto flutuante será inserido
print("%d/%f/%d" % (2, 4.5, 6))
print("%s/%s/%s" % ('a', 2, False)) # %s indica que algo será inserido como string
print("{}/{}/{}".format(2, 4, 6))
print("{:2d}/{:2d}/{:2d}".format(2, 4, 16))
print("{:02d}/{:02d}/{:02d}".format(2, 4, 160))
print('{:.2f}'.format(99.8765))
print('{:.0f}'.format(99.8765))
valor = 99
if valor == 99:
print('veloz')
elif value > 200:
print('muito veloz')
else:
print('lento')
x = 5
par_ou_impar = "par" if x % 2 == 0 else "impar"
print(par_ou_impar)
x = 0
while x < 5:
print(x)
x += 1
# definindo um intervalo
for i in range(10): # de 0 a 9, pulando de 1 em 1
print(i, end= " ")
print()
for i in range(5,10): # de 5 a 9, pulando de 1 em 1
print(i, end= " ")
print()
for i in range(0,10,2): # de 0 a 9, pulando de 2 em 2
print(i, end= " ")
print()
# utilizando uma sequência pre-definida
a = [1, 3, 4, 5, 7]
for i in a:
print(i)
# a função enumerate retorna uma lista de pares (i,x), onde:
# i é o índice (0, 1, 2, ...)
# x é o elemento da sequência original
a = [1, 3, 4, 5, 7]
for indice, valor in enumerate(a):
print(indice, valor)
# podemos também percorrer uma coleção mas não utilizar o valor daquele passo
for _ in range(5):
print('oi')
# existem também dois operadores dentro de um for:
# continue - encerra o passo atual e passa para o próximo
# break - encerra o laço
for x in range(10):
if x == 3:
continue
if x == 5:
break
print(x)
def soma(a, b):
return a + b
print(soma(3, 5))
print(soma('casa ', 'organizada'))
def f1():
print("Function 1")
def f2():
print("Function 2")
def chamar_funcao(f):
f()
chamar_funcao(f1)
chamar_funcao(f2)
chamar_funcao(lambda : print("Lambda function"))
from datetime import datetime
import time
inicio = datetime.now() # obtém o timestamp do momento atual
print(inicio)
for i in range(2):
time.sleep(1) # aplica um delay, em segundos
fim = datetime.now()
print(fim)
print("Tempo de execução: {}".format(fim - inicio))
lista = [1, 2, 3, 4, 5]
print(lista)
lista[2] #elemento da lista
lista[-1] # último elemento
lista[1:3] #sublista
lista[:3] # 3 primeiros elementos
lista[-2:] # 2 últimos elementos
lista[1:-1] # do segundo ao penúltimo elemento
len(lista)
sum(lista)
print(10 in lista)
print(5 in lista)
lista.append(7) # adiciona elemento na lista
lista.append(6)
lista.append(7)
print(lista)
lista.remove(7) # remove o primeiro elemento cujo valor é 7
print(lista)
lista.extend([8, 9, -10]) #extende a lista com outra lista
lista
lista2 = lista # lista2 referencia lista
lista2[-1] = 10
print(lista)
print(lista2)
lista3 = lista[:] # lista3 é uma nova lista com o mesmo conteúdo de lista
lista3[-1] = -10
print(lista)
print(lista3)
lista4 = [-4, 1, -2, 3]
print(sorted(lista4)) # ordena sem alterar a lista
print(sorted(lista4, reverse=True))
print(sorted(lista4, key=abs))
print(lista4)
lista4.sort() # altera a lista original
print(lista4)
import random
a = ['a', 'casa', 'está', 'muito', 'bem', 'organizada']
random.shuffle(a) # mutável
print(a)
random.shuffle(a)
print(a)
random.choice(a) # com repetição
numeros = range(1, 100)
random.sample(numeros, 5) # sem repetição
print(a)
print(' '.join(a))
b = [1, 'a', 25, 'abc']
print(b)
print(' '.join(b))
a = [1, 2, 3, 4, 5]
doubled_a = map(lambda x: 2*x, a)
doubled_a
#descomente uma alternativa para checar seu funcionamento
doubled_a = map(lambda x: 2*x, a)
# for x in doubled_a:
# print(x, end=" ")
# str_doubled_a = map(lambda x: str(x), doubled_a)
# print(" ".join(str_doubled_a))
# list(doubled_a)
even_a = filter(lambda x: x%2 == 0, a)
even_a
#descomente uma alternativa para checar seu funcionamento
even_a = filter(lambda x: x%2 == 0, a)
# for x in even_a:
# print(x, end=" ")
# str_even_a = map(lambda x: str(x), even_a)
# print(" ".join(str_even_a))
# list(even_a)
quadrados = [x * x for x in range(5)]
print(quadrados)
quadrados_dos_pares = [x * x for x in range(5) if x%2 == 0]
print(quadrados_dos_pares)
tupla = (1, 2, 3, 4)
print(tupla)
dicionario = { 'nome': 'Fulano', 'sobrenome': 'Da Silva' }
print(dicionario['nome'])
print(dicionario['sobrenome'])
print(dicionario.keys())
print(dicionario.values())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PART 1
Step2: This table shows the top 10 water consuming counties, the population, the amount of the population that is connected to public water (PublicSupply), and the total water used by each county.
Step3: Most Water Consuming Counties in Florida
Step4: Try to change the histogram so that it displays the County and the Population Total. (Right now it is displaying County and Water Use Total)
Step5: Rainfall in February 2005 (Inches)
Step6: Try to change the histogram to display the data in decending order.
Step7: PART 3
Step8: Water Level in February
Step9: Water Level in July
|
<ASSISTANT_TASK:>
Python Code:
# Import modules that contain functions we need
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
# Our data is a table and is defined as the word 'data'.
# 'data' is set equal to the .csv file that is read by the pandas function.
# The .csv file must be in the same disrectory as the program.
#data = pd.read_csv("Public Water Supply FL 2010.csv")
# You can also use external links to .xls, .csv, or .txt files and would import useing the same funtion but replaceing the
# file name with the webpage. For example:
data = pd.read_csv("https://gist.githubusercontent.com/GoodmanSciences/9d53d0874281a61354cc8a9a962cb926/raw/e379c22e667aa309cc02048bd2b7bb31ce540d60/Public%2520Water%2520Supply%2520FL%25202010.csv")
# displays the first few rows of the table
data.head(4)
# Set variables for scatter plot
x = data.Population
y = data.WaterUsed
fig = plt.figure(figsize=(15, 6))
plt.scatter(x,y)
plt.xlim(0,3000000)
plt.ylim(0,350)
plt.title('The Relationship Between Population and How Much Water a County Consumes Each Year')
plt.xlabel('Population (individuals)')
plt.ylabel('Water Used (million gallons)')
# This actually shows the plot
plt.show()
# Creates a new dataset for County
place = data.groupby("County", as_index = False).sum()
# Orginizes by County with the highest water usage in decending order
#Only displays the top 10 highestest water consuming counties by putting .head(10)
mostwater = place.sort_values(by="WaterUsed", ascending = False).head(10)
mostwater
# Displays a histogram of the top 10 water consuming counties in ascending order
mostwater.sort_values(by="WaterUsed", ascending=True).plot(x="County", y="WaterUsed", kind="barh", title="Top 10 Water Consuming Counties", legend=False);
# Imports more csv files locally
#feb = pd.read_csv("Feb2005_FL_rainfall.csv")
#july = pd.read_csv("July2005_FL_rainfall.csv")
# Imports more csv files from the web
july = pd.read_csv("https://gist.githubusercontent.com/GoodmanSciences/354fa30fb1e506c055621b893b26ebe8/raw/523e483ae4534c9432f91e5d5b7f9fb0356e95e1/Rainfall%2520FL%2520Jul2005.csv")
feb = pd.read_csv("https://gist.githubusercontent.com/GoodmanSciences/7088ff6b7b8e915a87ee987f3b767641/raw/a76a0dd975f95e6c0c5e6ee810e6f6e66faeca9b/Rainfall%2520FL%2520Feb2005.csv")
feb.head()
# Plots rainfall form ascending order
feb.sort_values(by="Monthly Total", ascending=True).plot(x="County", y="Monthly Total", kind="barh", title="Rainfall in February (Inches)", legend=False);
july.head()
july.sort_values(by="Monthly Total", ascending=True).plot(x="County", y="Monthly Total", kind="barh", title="Rainfall in July (Inches)", legend=False);
from IPython.display import Image
from IPython.core.display import HTML
Image(url= 'https://preview.ibb.co/g7Z6sa/Average_Monthly_Water_Consumption.png')
Image(url= 'https://floridamaps.files.wordpress.com/2015/03/florida-counties.jpg')
#Double-click to make this image GINORMOUS
# Imports another csv file locally
#level = pd.read_csv("Lake Apopka Waterlevel 2005.csv")
# Imports another csv file from the web
level = pd.read_csv("https://gist.githubusercontent.com/GoodmanSciences/e63b6cb68cd6ef5235dc8c113ea9995a/raw/39139535f7ef05057ecce1126ea336ca7bcfb879/Lake%2520Apopka%2520Waterlevel%25202005.csv")
# Sets Date as index
lev2 = level.set_index("Date")
# Displays only Feb 1st through the 28th
lev2.loc["2/1/2005":"2/28/2005", :]
# Displays only July 1st through the 7th
lev2.loc["7/1/2005":"7/7/2005", :]
# Plot of all values in level dataset
level.plot('Date', 'Water Level')
Image(url= 'http://www.floridacountiesmap.com/graphics/orange.gif')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function depth_limited_search tries to find a solution to the search problem
Step2: Solving the Sliding Puzzle
|
<ASSISTANT_TASK:>
Python Code:
def search(start, goal, next_states):
limit = 36
while True:
Path = depth_limited_search(start, goal, next_states, [start], { start }, limit)
if Path is not None:
return Path
limit += 1
print(f'limit = {limit}')
def depth_limited_search(state, goal, next_states, Path, PathSet, limit):
if state == goal:
return Path
if len(Path) == limit:
return None
for ns in next_states(state):
if ns not in PathSet:
Path .append(ns)
PathSet.add(ns)
Result = depth_limited_search(ns, goal, next_states, Path, PathSet, limit)
if Result is not None:
return Result
Path .pop()
PathSet.remove(ns)
return None
%run Sliding-Puzzle.ipynb
%load_ext memory_profiler
%%time
Path=search(start, goal, next_states)
len(Path)
animation(Path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new Bundle.
Step2: Semi-Detached Systems
Step3: We can view the constraint on requiv by accessing the constraint
Step4: Now whenever any of the relevant parameters (q, ecc, syncpar, sma) are changed, the value of requiv will change to match the critical value as defined by requiv_max.
Step5: Note that the point region in semi-detached systems often falls outside of the atmosphere tables. Since this requires the full mesh, this cannot be checked in advance with b.run_checks but will raise an error during b.run_compute.
Step6: In this case, it is necessary to fallback on blackbody atmospheres and manually provide the limb-darkening function and coefficients
Step7: The default values of the ld_func and ld_coeffs should be changed to appropriate values for a given system, but should still compute without errors.
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
b.add_constraint('semidetached', 'primary')
b['requiv@constraint@primary']
b['requiv_max@constraint@primary']
b.add_dataset('lc', compute_phases=phoebe.linspace(0,1,101))
try:
b.run_compute()
except Exception as err:
print(err)
b.set_value('atm', component='primary', value='blackbody')
b.set_value('ld_mode', component='primary', value='manual')
print(b.filter(qualifier=['ld_func', 'ld_coeffs']))
b.run_compute()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: With the counts stored in a Counter object, lets now quickly print out the top ten libraries and their respective counts.
Step2: Nothing there seems particularly suprising as this is run from my laptop meaning that a lot of the scripts written are statistical or just simple automations and the above libraries are pretty useful for that. I'd be interested to run this same script on my GPU desktop as it is there that I run any neural networks and heavy machine learning files. I suspect PyTorch and TensorFlow may start to appear in the top few items then.
|
<ASSISTANT_TASK:>
Python Code:
import os
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
directory = '/home/tpin3694/Documents/'
python_files = [os.path.join(root, name)
for root, dirs, files in os.walk(directory)
for name in files if name.endswith(('.py'))]
print('Found {} Python files\n'.format(len(python_files)))
libraries = []
error_count = 0
for file in python_files:
try:
file_import = open(file, 'r')
file_data = file_import.readlines()
for line in file_data:
if line.startswith(('import')) or line.startswith(('from')):
libraries.append(line.split()[1])
except UnicodeDecodeError:
error_count += 1
print('{}% files raising encoding errors.\n'.format(round(100*error_count/len(python_files),2)))
library_counts = Counter(libraries)
print('Top 15 Libraries')
for label, count in library_counts.most_common(15):
print('{}: {}'.format(label, count))
labels, counts = zip(*library_counts.most_common(15))
plt.figure(figsize=(12, 8))
plt.bar(labels, counts)
plt.xticks(rotation=60);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graded = 8/8
Step2: After writing a code that returns a result, now automating that for the various dates using a function
Step3: 2) What are all the different book categories the NYT ranked in June 6, 2009? How about June 6, 2015?
Step4: 4) What's the title of the first story to mention the word 'hipster' in 1995? What's the first paragraph?
Step5: 5) How many times was gay marriage mentioned in the NYT between 1950-1959, 1960-1969, 1970-1978, 1980-1989, 1990-2099, 2000-2009, and 2010-present?
Step6: 6) What section talks about motorcycles the most?
Step7: 7) How many of the last 20 movies reviewed by the NYT were Critics' Picks? How about the last 40? The last 60?
Step8: 8) Out of the last 40 movie reviews from the NYT, which critic has written the most reviews?
|
<ASSISTANT_TASK:>
Python Code:
#API Key: 0c3ba2a8848c44eea6a3443a17e57448
import requests
bestseller_response = requests.get('http://api.nytimes.com/svc/books/v2/lists/2009-05-10/hardcover-fiction?api-key=0c3ba2a8848c44eea6a3443a17e57448')
bestseller_data = bestseller_response.json()
print("The type of bestseller_data is:", type(bestseller_data))
print("The keys of bestseller_data are:", bestseller_data.keys())
# Exploring the data structure further
bestseller_books = bestseller_data['results']
print(type(bestseller_books))
print(bestseller_books[0])
for book in bestseller_books:
#print("NEW BOOK!!!")
#print(book['book_details'])
#print(book['rank'])
if book['rank'] == 1:
for element in book['book_details']:
print("The book that topped the hardcover fiction NYT Beststeller list on Mothers Day in 2009 was", element['title'], "written by", element['author'])
def bestseller(x, y):
bestsellerA_response = requests.get('http://api.nytimes.com/svc/books/v2/lists/'+ x +'/hardcover-fiction?api-key=0c3ba2a8848c44eea6a3443a17e57448')
bestsellerA_data = bestsellerA_response.json()
bestsellerA_books = bestsellerA_data['results']
for book in bestsellerA_books:
if book['rank'] == 1:
for element in book['book_details']:
print("The book that topped the hardcover fiction NYT Beststeller list on", y, "was",
element['title'], "written by", element['author'])
bestseller('2009-05-10', "Mothers Day 2009")
bestseller('2010-05-09', "Mothers Day 2010")
bestseller('2009-06-21', "Fathers Day 2009")
bestseller('2010-06-20', "Fathers Day 2010")
#Alternative solution would be, instead of putting this code into a function to loop it:
#1) to create a dictionary called dates containing y as keys and x as values to these keys
#2) to take the above code and nest it into a for loop that loops through the dates, each time using the next key:value pair
# for date in dates:
# replace value in URL and run the above code used inside the function
# replace key in print statement
# STEP 1: Exploring the data structure using just one of the dates from the question
bookcat_response = requests.get('http://api.nytimes.com/svc/books/v2/lists/names.json?published-date=2009-06-06&api-key=0c3ba2a8848c44eea6a3443a17e57448')
bookcat_data = bookcat_response.json()
print(type(bookcat_data))
print(bookcat_data.keys())
bookcat = bookcat_data['results']
print(type(bookcat))
print(bookcat[0])
# STEP 2: Writing a loop that runs the same code for both dates (no function, as only one variable)
dates = ['2009-06-06', '2015-06-15']
for date in dates:
bookcatN_response = requests.get('http://api.nytimes.com/svc/books/v2/lists/names.json?published-date=' + date + '&api-key=0c3ba2a8848c44eea6a3443a17e57448')
bookcatN_data = bookcatN_response.json()
bookcatN = bookcatN_data['results']
category_listN = []
for category in bookcatN:
category_listN.append(category['display_name'])
print(" ")
print("THESE WERE THE DIFFERENT BOOK CATEGORIES THE NYT RANKED ON", date)
for cat in category_listN:
print(cat)
# STEP 1a: EXPLORING THE DATA
test_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q=Gaddafi+Libya&api-key=0c3ba2a8848c44eea6a3443a17e57448')
test_data = test_response.json()
print(type(test_data))
print(test_data.keys())
test_hits = test_data['response']
print(type(test_hits))
print(test_hits.keys())
# STEP 1b: EXPLORING THE META DATA
test_hits_meta = test_data['response']['meta']
print("The meta data of the search request is a", type(test_hits_meta))
print("The dictionary despot_hits_meta has the following keys:", test_hits_meta.keys())
print("The search requests with the TEST URL yields total:")
test_hit_count = test_hits_meta['hits']
print(test_hit_count)
# STEP 2: BUILDING THE CODE TO LOOP THROUGH DIFFERENT SPELLINGS
despot_names = ['Gadafi', 'Gaddafi', 'Kadafi', 'Qaddafi']
for name in despot_names:
despot_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q=' + name +'+Libya&api-key=0c3ba2a8848c44eea6a3443a17e57448')
despot_data = despot_response.json()
despot_hits_meta = despot_data['response']['meta']
despot_hit_count = despot_hits_meta['hits']
print("The NYT has referred to the Libyan despot", despot_hit_count, "times using the spelling", name)
hip_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q=hipster&fq=pub_year:1995&api-key=0c3ba2a8848c44eea6a3443a17e57448')
hip_data = hip_response.json()
print(type(hip_data))
print(hip_data.keys())
# STEP 1: EXPLORING THE DATA STRUCTURE:
hipsters = hip_data['response']
#print(hipsters)
#hipsters_meta = hipsters['meta']
#print(type(hipsters_meta))
hipsters_results = hipsters['docs']
print(hipsters_results[0].keys())
#print(type(hipsters_results))
#STEP 2: LOOPING FOR THE ANSWER:
earliest_date = '1996-01-01'
for mention in hipsters_results:
if mention['pub_date'] < earliest_date:
earliest_date = mention['pub_date']
print("This is the headline of the first text to mention 'hipster' in 1995:", mention['headline']['main'])
print("It was published on:", mention['pub_date'])
print("This is its lead paragraph:")
print(mention['lead_paragraph'])
# data structure requested same as in task 3, just this time loop though different date ranges
def countmention(a, b, c):
if b == ' ':
marry_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q="gay marriage"&begin_date='+ a +'&api-key=0c3ba2a8848c44eea6a3443a17e57448')
else:
marry_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q="gay marriage"&begin_date='+ a +'&end_date='+ b +'&api-key=0c3ba2a8848c44eea6a3443a17e57448')
marry_data = marry_response.json()
marry_hits_meta = marry_data['response']['meta']
marry_hit_count = marry_hits_meta['hits']
print("The count for NYT articles mentioning 'gay marriage' between", c, "is", marry_hit_count)
#supposedly, there's a way to solve the following part in a more efficient way, but those I tried did not work,
#so it ended up being more time-efficient just to type it:
countmention('19500101', '19591231', '1950 and 1959')
countmention('19600101', '19691231', '1960 and 1969')
countmention('19700101', '19791231', '1970 and 1979')
countmention('19800101', '19891231', '1980 and 1989')
countmention('19900101', '19991231', '1990 and 1999')
countmention('20000101', '20091231', '2000 and 2009')
countmention('20100101', ' ', '2010 and present')
moto_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q=motorcycle&facet_field=section_name&facet_filter=true&api-key=0c3ba2a8848c44eea6a3443a17e57448')
moto_data = moto_response.json()
#STEP 1: EXPLORING DATA STRUCTURE
#print(type(moto_data))
#print(moto_data.keys())
#print(moto_data['response'])
#print(moto_data['response'].keys())
#print(moto_data['response']['facets'])
#STEP 2: Code to get to the answer
moto_facets = moto_data['response']['facets']
#print(moto_facets)
#print(moto_facets.keys())
moto_sections = moto_facets['section_name']['terms']
#print(moto_sections)
#this for loop is not necessary, but it's nice to know the counts
#(also to check whether the next loop identifies the right section)
for section in moto_sections:
print("The section", section['term'], "mentions motorcycles", section['count'], "times.")
most_motorcycles = 0
for section in moto_sections:
if section['count'] > most_motorcycles:
most_motorcycles = section['count']
print(" ")
print("That means the section", section['term'], "mentions motorcycles the most, namely", section['count'], "times.")
picks_offset_values = [0, 20, 40]
picks_review_list = []
for value in picks_offset_values:
picks_response = requests.get ('http://api.nytimes.com/svc/movies/v2/reviews/search.json?&offset=' + str(value) + '&api-key=0c3ba2a8848c44eea6a3443a17e57448')
picks_data = picks_response.json()
#STEP 1: EXPLORING THE DATA STRUCTURE (without the loop)
#print(picks_data.keys())
#print(picks_data['num_results'])
#print(picks_data['results'])
#print(type(picks_data['results']))
#print(picks_data['results'][0].keys())
#STEP 2: After writing a test code (not shown) without the loop, now CODING THE LOOP
last_reviews = picks_data['num_results']
picks_results = picks_data['results']
critics_pick_count = 0
for review in picks_results:
if review['critics_pick'] == 1:
critics_pick_count = critics_pick_count + 1
picks_new_count = critics_pick_count
picks_review_list.append(picks_new_count)
print("Out of the last", last_reviews + value, "movie reviews,", sum(picks_review_list), "were Critics' picks.")
#STEP 1: EXPLORING THE DATA STRUCTURE (without the loop)
#critics_response = requests.get('http://api.nytimes.com/svc/movies/v2/reviews/search.json?&offset=0&api-key=0c3ba2a8848c44eea6a3443a17e57448')
#critics_data = critics_response.json()
#print(critics_data.keys())
#print(critics_data['num_results'])
#print(critics_data['results'])
#print(type(critics_data['results']))
#print(critics_data['results'][0].keys())
#STEP 2: CREATE A LOOP, THAT GOES THROUGH THE SEARCH RESULTS FOR EACH OFFSET VALUE AND STORES THE RESULTS IN THE SAME LIST
#(That list is then passed on to step 3)
critics_offset_value = [0, 20]
critics_list = [ ]
for value in critics_offset_value:
critics_response = requests.get('http://api.nytimes.com/svc/movies/v2/reviews/search.json?&offset=' + str(value) + '&api-key=0c3ba2a8848c44eea6a3443a17e57448')
critics_data = critics_response.json()
critics = critics_data['results']
for review in critics:
critics_list.append(review['byline'])
#print(critics_list)
unique_critics = set(critics_list)
#print(unique_critics)
#STEP 3: FOR EVERY NAME IN THE UNIQUE CRITICS LIST, LOOP THROUGH NON-UNIQUE LIST TO COUNT HOW OFTEN THEY OCCUR
#STEP 4: SELECT THE ONE THAT HAS WRITTEN THE MOST (from the #print statement below, I know it's two people with same score)
max_count = 0
for name in unique_critics:
name_count = 0
for critic in critics_list:
if critic == name:
name_count = name_count + 1
if name_count > max_count:
max_count = name_count
max_name = name
if name_count == max_count:
same_count = name_count
same_name = name
#print(name, "has written", name_count, "reviews out of the last 40 reviews.")
print(max_name, "has written the most of the last 40 reviews:", max_count)
print(same_name, "has written the most of the last 40 reviews:", same_count)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Leaky Relu
Step2: BCE
Step3: GENERATOR AND DISCRIMINATOR FUNCTIONS
Step4: Model
Step5: Optimizer
Step6: Sample Generator
Step7: Aux functions
Step8: Load the DATA
Step9: Training Part
|
<ASSISTANT_TASK:>
Python Code:
def batchnormalization(X, eps=1e-8, W=None, b=None):
if X.get_shape().ndims == 4:
mean = tf.reduce_mean(X, [0,1,2])
standar_desviation = tf.reduce_mean(tf.square(X-mean), [0,1,2])
X = (X - mean) / tf.sqrt(standar_desviation + eps)
if W is not None and b is not None:
W = tf.reshape(W, [1,1,1,-1])
b = tf.reshape(b, [1,1,1,-1])
X = X*W + b
elif X.get_shape().ndims == 2:
mean = tf.reduce_mean(X, 0)
standar_desviation = tf.reduce_mean(tf.square(X-mean), 0)
X = (X - mean) / tf.sqrt(standar_desviation + eps)
if W is not None and b is not None:
W = tf.reshape(W, [1,-1])
b = tf.reshape(b, [1,-1])
X = X*W + b
return X
def leakyRelu(X, alpha=0.2):
return tf.maximum(X,tf.multiply(X, alpha))
def bce(x, z):
x = tf.clip_by_value(x, 1e-7, 1. - 1e-7)
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = x, labels = z))
def MultilayerPerceptronGenerator(Z, Y, batch_size):
kernel_W1 = [int(Z.get_shape()[1] + Y.get_shape()[1]), dim_W1]
kernel_W2 = [dim_W1 + int(Y.get_shape()[1]), dim_W2*7*7]
kernel_W3 = [5, 5, dim_W3, dim_W2 + int(Y.get_shape()[1])]
kernel_W4 = [5, 5, dim_channel, dim_W3 + int(Y.get_shape()[1])]
gen_W1 = tf.get_variable("gen_W1", kernel_W1, initializer=tf.random_normal_initializer(stddev=0.02))
gen_W2 = tf.get_variable("gen_W2", kernel_W2, initializer=tf.random_normal_initializer(stddev=0.02))
gen_W3 = tf.get_variable("gen_W3", kernel_W3, initializer=tf.random_normal_initializer(stddev=0.02))
gen_W4 = tf.get_variable("gen_W4", kernel_W4, initializer=tf.random_normal_initializer(stddev=0.02))
yb = tf.reshape(Y, [batch_size, 1, 1, int(Y.get_shape()[1])])
Z = tf.concat([Z, Y], axis=1)
op1 = tf.nn.relu(batchnormalization(tf.matmul(Z, gen_W1)))
op1 = tf.concat([op1, Y], axis=1)
op2 = tf.nn.relu(batchnormalization(tf.matmul(op1, gen_W2)))
op2 = tf.reshape(op2, [batch_size, 7, 7, dim_W2])
op2 = tf.concat([op2, yb*tf.ones([batch_size, 7, 7, int(Y.get_shape()[1])])], axis = 3)
op3 = tf.nn.conv2d_transpose(op2, gen_W3, output_shape=[batch_size, 14, 14, dim_W3], strides=[1,2,2,1])
op3 = tf.nn.relu(batchnormalization(op3))
op3 = tf.concat([op3, yb*tf.ones([batch_size, 14, 14, Y.get_shape()[1]])], axis = 3)
op4 = tf.nn.conv2d_transpose(op3, gen_W4, output_shape=[batch_size, 28, 28, dim_channel], strides=[1,2,2,1])
return op4
def MultilayerPerceptronDiscriminator(image, Y, batch_size):
kernel_W1 = [5, 5, dim_channel + int(dim_y), dim_W3]
kernel_W2 = [5, 5, dim_W3 + int(dim_y), dim_W2]
kernel_W3 = [dim_W2*7*7 + int(dim_y), dim_W1]
kernel_W4 = [dim_W1 + int(dim_y), 1]
dis_W1 = tf.get_variable("dis_W1", kernel_W1, initializer=tf.random_normal_initializer(stddev=0.02))
dis_W2 = tf.get_variable("dis_W2", kernel_W2, initializer=tf.random_normal_initializer(stddev=0.02))
dis_W3 = tf.get_variable("dis_W3", kernel_W3, initializer=tf.random_normal_initializer(stddev=0.02))
dis_W4 = tf.get_variable("dis_W4", kernel_W4, initializer=tf.random_normal_initializer(stddev=0.02))
yb = tf.reshape(Y, tf.stack([batch_size, 1, 1, int(Y.get_shape()[1])]))
X = tf.concat([image, yb*tf.ones([batch_size, 28, 28, int(Y.get_shape()[1])])], axis = 3)
op1 = leakyRelu( tf.nn.conv2d( X, dis_W1, strides=[1, 2, 2, 1], padding='SAME'))
op1 = tf.concat([op1, yb*tf.ones([batch_size, 14, 14, int(Y.get_shape()[1])])], axis = 3)
op2 = leakyRelu( tf.nn.conv2d( op1, dis_W2, strides=[1, 2, 2, 1], padding='SAME'))
op2 = tf.reshape(op2, [batch_size, -1])
op2 = tf.concat([op2, Y], axis = 1)
op3 = leakyRelu(batchnormalization(tf.matmul(op2, dis_W3)))
op3 = tf.concat([op3, Y], axis = 1)
p = tf.nn.sigmoid(tf.matmul(op3, dis_W4))
return p, op3
def createModel(batch_size):
Z = tf.placeholder(tf.float32, [batch_size, dim_z])
Y = tf.placeholder(tf.float32, [batch_size, dim_y])
image_real = tf.placeholder(tf.float32, [batch_size] + image_shape)
op4_generated = MultilayerPerceptronGenerator(Z,Y, batch_size)
image_generate = tf.nn.sigmoid(op4_generated)
with tf.variable_scope("discriminator_variables") as scope:
p_real, raw_real = MultilayerPerceptronDiscriminator(image_real, Y, batch_size)
scope.reuse_variables()
p_gen, raw_gen = MultilayerPerceptronDiscriminator(image_generate, Y, batch_size)
dis_cost_real = bce(raw_real, tf.ones_like(raw_real))
dis_cost_gen = bce(raw_gen, tf.zeros_like(raw_gen))
dis_cost = dis_cost_real + dis_cost_gen
gen_cost = bce (raw_gen, tf.ones_like(raw_gen))
return Z, Y, image_real, dis_cost, gen_cost, p_real, p_gen
def optimizer_function(d_cost_tf, g_cost_tf, dis_vars, gen_vars):
train_op_dis = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(d_cost_tf, var_list=dis_vars)
train_op_gen = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(g_cost_tf, var_list=gen_vars)
return train_op_dis, train_op_gen
def sample_creator(dimension):
Z = tf.placeholder(tf.float32, [dimension, dim_z])
Y = tf.placeholder(tf.float32, [dimension, dim_y])
op4 = MultilayerPerceptronGenerator(Z,Y,dimension)
image = tf.nn.sigmoid(op4)
return Z,Y,image
def OneHot(X, n=None, negative_class=0.):
X = np.asarray(X).flatten()
if n is None:
n = np.max(X) + 1
Xoh = np.ones((len(X), n)) * negative_class
Xoh[np.arange(len(X)), X] = 1.
return Xoh
def save_visualization(X, nh_nw, save_path='tmp/sample.jpg'):
h,w = X.shape[1], X.shape[2]
img = np.zeros((h * nh_nw[0], w * nh_nw[1], 3))
for n,x in enumerate(X):
j = n // nh_nw[1]
i = n % nh_nw[1]
img[j*h:j*h+h, i*w:i*w+w, :] = x
scipy.misc.imsave(save_path, img)
sys.path.append('..')
data_dir = 'data/'
def mnist():
fd = open(os.path.join(data_dir,'train-images.idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'train-labels.idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000))
fd = open(os.path.join(data_dir,'t10k-images.idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'t10k-labels.idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trY = np.asarray(trY)
teY = np.asarray(teY)
return trX, teX, trY, teY
def mnist_with_valid_set():
trX, teX, trY, teY = mnist()
train_inds = np.arange(len(trX))
np.random.shuffle(train_inds)
trX = trX[train_inds]
trY = trY[train_inds]
vaX = trX[50000:]
vaY = trY[50000:]
trX = trX[:50000]
trY = trY[:50000]
return trX, vaX, teX, trY, vaY, teY
train_data, validation_data, test_data, train_label, validation_label, test_label = mnist_with_valid_set()
print("Train set of : " + str(train_data.shape))
print("Train label of : " + str(train_label.shape))
print("Test set of : " + str(test_data.shape))
print("Test label of : " + str(test_label.shape))
print("Validation set of : " + str(validation_data.shape))
print("Validation label of : " + str(validation_label.shape))
n_epochs = 100
learning_rate = 0.0002
batch_size = 128
image_shape = [28,28,1]
dim_z = 100
dim_W1 = 1024
dim_W2 = 128
dim_W3 = 64
dim_channel = 1
dim_y = 10
visualize_dimension=196
with tf.variable_scope("training_part") as scope:
Z_tf, Y_tf, image_tf, d_cost_tf, g_cost_tf, p_real, p_gen = createModel(batch_size)
session = tf.InteractiveSession()
saver = tf.train.Saver(max_to_keep=10)
scope.reuse_variables()
Z_sample, Y_sample, image_sample = sample_creator(visualize_dimension)
dis_vars = filter(lambda x: x.name.startswith(scope.name+'/dis'), tf.global_variables())
gen_vars = filter(lambda x: x.name.startswith(scope.name+'/gen'), tf.global_variables())
dis_vars = [i for i in dis_vars]
gen_vars = [i for i in gen_vars]
train_op_dis, train_op_gen = optimizer_function(d_cost_tf, g_cost_tf, dis_vars, gen_vars)
tf.global_variables_initializer().run()
Z_np_sample = np.random.uniform(-1, 1, size=(visualize_dimension, dim_z))
Y_np_sample = OneHot(np.random.randint(10, size=[visualize_dimension]))
iterations = 0
k = 2
#Information variables of the training process
sample_creation = 200 #Iteration where a sample is going to be created
show_information = 25 #Iteration where the information is going to be showed
print("Starting the training process")
for epoch in range(n_epochs):
index = np.arange(len(train_label))
np.random.shuffle(index)
train_data = train_data[index]
train_label = train_label[index]
for start, end in zip(
range(0, len(train_label), batch_size),
range(batch_size, len(train_label), batch_size)
):
Xs = train_data[start:end].reshape( [-1, 28, 28, 1]) / 255.
Ys = OneHot(train_label[start:end])
Zs = np.random.uniform(-1, 1, size=[batch_size, dim_z]).astype(np.float32)
if np.mod( iterations, k ) != 0:
_, gen_loss_val = session.run([train_op_gen, g_cost_tf],feed_dict={Z_tf:Zs,Y_tf:Ys})
discrim_loss_val, p_real_val, p_gen_val = session.run([d_cost_tf,p_real,p_gen],feed_dict={Z_tf:Zs, image_tf:Xs, Y_tf:Ys})
else:
_, discrim_loss_val = session.run([train_op_dis, d_cost_tf],feed_dict={Z_tf:Zs,Y_tf:Ys,image_tf:Xs})
gen_loss_val, p_real_val, p_gen_val = session.run([g_cost_tf, p_real, p_gen],feed_dict={Z_tf:Zs, image_tf:Xs, Y_tf:Ys})
if np.mod(iterations, show_information) == 0:
print("========== Showing information =========")
print("iteration:", iterations)
print("gen loss:", gen_loss_val)
print("discrim loss:", discrim_loss_val)
print("Average P(real)=", p_real_val.mean())
print("Average P(gen)=", p_gen_val.mean())
if np.mod(iterations, sample_creation) == 0:
generated_sample = session.run(image_sample,feed_dict={Z_sample:Z_np_sample,Y_sample:Y_np_sample})
generated_samples = (generated_sample + 1.)/2.
save_visualization(generated_samples, (14,14), save_path='image/sample_%04d.jpg' % int(iterations/sample_creation))
iterations += 1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the dataset that will be used
Step2: Let us drop the missing and duplicated values since they don't matter for now (already covered in other notebooks)
Step3: Dealing with outliers
Step4: Create the mask
Step5: Check if some outliers were caught
Step6: Yes! Two were found! The mask_age variable contains the rows we want to keep, i.e., the rows that meet the bounds above. So, lets drop the above 2 rows
Step7: 2) Create classes/bins
Step8: Discretize!
Step9: The limits of the defined classes/bins are
Step10: We could replace the height values by the new five categories. Nevertheless, looks like a person with 252 cm is actually an outlier and the best would be to evaluate this value against two-standard deviations or percentile (e.g., 99%).
Step11: Which ones are out of the bounds?
Step12: Let's delete these rows (mask_height contains the rows we want to keep)
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
% matplotlib inline
from matplotlib import pyplot as plt
data = pd.read_csv('../data/data_with_problems.csv', index_col=0)
print('Our dataset has %d columns (features) and %d rows (people).' % (data.shape[1], data.shape[0]))
data.head(15)
data = data.drop_duplicates()
data = data.dropna()
print('Our dataset has %d columns (features) and %d rows (people).' % (data.shape[1], data.shape[0]))
min_age = 0
max_age = 117 # oldest person currently alive
mask_age = (data['age'] >= min_age) & (data['age'] <= max_age)
mask_age.head(10)
data[~mask_age]
data = data[mask_age]
print('Our dataset has %d columns (features) and %d rows (people).' % (data.shape[1], data.shape[0]))
data['height'].hist(bins=100)
plt.title('Height population distribution')
plt.xlabel('cm')
plt.ylabel('freq')
plt.show()
height_bins = pd.qcut(data['height'],
5,
labels=['very short', 'short', 'average', 'tall', 'very tall'],
retbins=True)
height_bins[0].head(10)
height_bins[1]
# Calculate the mean and standard deviation
std_height = data['height'].std()
mean_height = data['height'].mean()
# The mask!
mask_height = (data['height'] > mean_height-2*std_height) & (data['height'] < mean_height+2*std_height)
print('Height bounds:')
print('> Minimum accepted height: %3.1f' % (mean_height-2*std_height))
print('> Maximum accepted height: %3.1f' % (mean_height+2*std_height))
data.loc[~mask_height]
data = data[mask_height]
print('Our dataset has %d columns (features) and %d rows (people).' % (data.shape[1], data.shape[0]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Set Configuration
Step2: 3. Enter DV360 Bulk Editor Recipe Parameters
Step3: 4. Execute DV360 Bulk Editor
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/google/starthinker
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
FIELDS = {
'auth_dv':'user', # Credentials used for dv.
'auth_sheet':'user', # Credentials used for sheet.
'auth_bigquery':'service', # Credentials used for bigquery.
'recipe_name':'', # Name of Google Sheet to create.
'recipe_slug':'', # Name of Google BigQuery dataset to create.
'command':'Load Partners', # Action to take.
}
print("Parameters Set To: %s" % FIELDS)
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'dataset':{
'__comment__':'Ensure dataset exists.',
'auth':{'field':{'name':'auth_bigquery','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'dataset':{'field':{'name':'recipe_slug','prefix':'DV_Editor_','kind':'string','order':2,'default':'','description':'Name of Google BigQuery dataset to create.'}}
}
},
{
'drive':{
'__comment__':'Copy the default template to sheet with the recipe name',
'auth':{'field':{'name':'auth_sheet','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'copy':{
'source':'https://docs.google.com/spreadsheets/d/18G6cGo4j5SsY08H8P53R22D_Pm6m-zkE6APd3EDLf2c/',
'destination':{'field':{'name':'recipe_name','prefix':'DV Editor ','kind':'string','order':3,'default':'','description':'Name of Google Sheet to create.'}}
}
}
},
{
'dv_editor':{
'__comment':'Depending on users choice, execute a different part of the solution.',
'auth_dv':{'field':{'name':'auth_dv','kind':'authentication','order':1,'default':'user','description':'Credentials used for dv.'}},
'auth_sheets':{'field':{'name':'auth_sheet','kind':'authentication','order':2,'default':'user','description':'Credentials used for sheet.'}},
'auth_bigquery':{'field':{'name':'auth_bigquery','kind':'authentication','order':3,'default':'service','description':'Credentials used for bigquery.'}},
'sheet':{'field':{'name':'recipe_name','prefix':'DV Editor ','kind':'string','order':4,'default':'','description':'Name of Google Sheet to create.'}},
'dataset':{'field':{'name':'recipe_slug','prefix':'DV_Editor_','kind':'string','order':5,'default':'','description':'Name of Google BigQuery dataset to create.'}},
'command':{'field':{'name':'command','kind':'choice','choices':['Clear Partners','Clear Advertisers','Clear Campaigns','Clear Insertion Orders And Line Items','Clear Preview','Clear Update','Load Partners','Load Advertisers','Load Campaigns','Load Insertion Orders And Line Items','Preview','Update'],'order':6,'default':'Load Partners','description':'Action to take.'}}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Interact basics
Step3: Use the interact function to interact with the print_sum function.
Step5: Write a function named print_string that prints a string and additionally prints the length of that string if a boolean parameter is True.
Step6: Use the interact function to interact with the print_string function.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
from IPython.html import widgets
def print_sum(a, b):
Print the sum of the arguments a and b.
return a+b
interact(print_sum, a=(widgets.FloatSlider(min=-10.,max=10.,step=0.1,value=0)), b=(widgets.IntSlider(min=-8,max=8,step=2,value=0)));
assert True # leave this for grading the print_sum exercise
def print_string(s, length=False):
Print the string s and optionally its length.
print(s)
if length==True:
print(len(s))
interact(print_string, s="Hello", length=False);
assert True # leave this for grading the print_string exercise
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Lets extract the zipcode and precipetation data from California Department of Water Resources
Step4: Crop value extract from
|
<ASSISTANT_TASK:>
Python Code:
for i in range(9):
print county_name[i]
zipcode=[93210,93263,93202,93638,93620,95641,95242,95326,93201]
ZipcodeList=[{ "County_N":county_name[i], "zipcode":zipcode[i] } for i in range(len(zipcode))]
COUNTYZIP=pd.DataFrame(ZipcodeList, columns=["County_N", "zipcode"])
COUNTYZIP
start="2010-01-01"
def ndb_search(term,start,end,verbose = False):
This takes all of the necessary parameters to form a query
Input: key (data.gov API key, string), term (type, string)
Output: JSON object
url = "http://et.water.ca.gov/api/data"
response = requests.get(url, params = {
"targets": term,
"appKey":"90e36c84-3f23-48a3-becd-1865076a04fd",
"startDate":start,
"EndDate":end,
"dataItems": "day-precip"
})
response.raise_for_status() # check for errors
if verbose:
print response.url
return response.json() # parse JSON
Tulare2010_Recode=Tulare2010["Data"]["Providers"][0]['Records']
len(Tulare2010_Recode)
#note inside a county there may be multilple station that recode the data
# we take the mean then times 365 to get one year rain
# note the value is inches
precip=[ Tulare2010_Recode[i]['DayPrecip']['Value'] for i in range(len(Tulare2010_Recode))]
precip2=np.array(precip).astype(np.float)
#precip2
#WRITE INTO FUNCTIONS
def precip_cal(term,year,verbose = False):
This takes zipcode and year gives precipitaion of a year
Input: term (zipcode, int), year (year, int)
Output: precipitation of a year and a certain county
start="{}-01-01".format("".join(str(year)))
end="{}-12-31".format("".join(str(year)))
Tulare2010=ndb_search(term,start,end,verbose = False)
Tulare2010_Recode=Tulare2010["Data"]["Providers"][0]['Records']
precip=[ Tulare2010_Recode[i]['DayPrecip']['Value'] for i in range(len(Tulare2010_Recode))]
precip2=np.array(precip).astype(np.float)
return np.nanmean(precip2)*365 # parse JSON
year=[2010,2011,2012,2013,2014,2015]
ZipcodeList=[{ "County_N":county_name[i], "zipcode":zipcode[i],"year":year[j]} for i in range(len(zipcode)) for j in range(6) ]
ZipcodeList
COUNTYYear=pd.DataFrame(ZipcodeList, columns=["County_N", "zipcode","year"])
x=[precip_cal(COUNTYYear["zipcode"][i],COUNTYYear["year"][i]) for i in xrange(54) ]
COUNTYYear=pd.DataFrame(ZipcodeList, columns=["County_N", "zipcode","year"])
COUNTYYear["Precip"]=x
COUNTYYear
COUNTYYear
# unit for precip is inch
newtable=pd.merge(KAWEAH, COUNTYYear,how="right")
f = {'Acres':['sum'], 'WaterUsage':['mean'], 'UsageTotal':['sum'], 'count':['sum'],"Precip":['mean']}
grouped_data=newtable.groupby(['Subbasin_N', 'County_N', 'Year', 'CropName']).agg(f)
cropname=np.unique(KAWEAH["CropName"])
cropname
for i in range(len(cropname)):
print corpname[i]
len(cropname)
def avg(l):
return sum(l, 0.0) / len(l)
avg([1*3,2*5])
1628*140.00
avg([ 8.88*466 ,5.73*682 ,2.48*3390 ,19.00*391,8.33*780,14.10*429 ,5.30*664 , 1.76 *3710,1750*2.06 ])
# data from price value in 2013
# econ value is dollar per acers
Econ_dict = { "Al Pist":2360*3.21,
"Alfalfa":7.0*206.00,
"Corn": 26.50*48.23,
"Cotton":1628*140.00,
"Cucurb":avg([260*20.20, 180*35.40, 200*25.90,580*13.00,300*16.00,330*15.60]),
#Honeydew Melons 260 2,730,000 20.20 Cwt. Cwt. $/Cwt.
#"Squash" 180 1,224,000 35.40 Cwt. Cwt. $/Cwt.
#"Cucumbers" 200 760,000 25.90 Cwt. Cwt. $/Cwt.
#"Watermelons" 580 5,800,000 13.00 Cwt. Cwt. $/Cwt.
#"Cantaloupes" 300 12,750,000 16.00 Cwt. Cwt. $/Cwt.
#"Pumpkins 330 1,947,000 15.60 Cwt. Cwt. $/Cwt.
"DryBean": 2320*56.80,
"Grain":5.35*190.36,
"On Gar":avg([ 400*13.20,165*60.30 ]),
#"Onions" spring 400 2,720,000 13.20 summer 490 3,822,000 6.40 Onions, Summer Storage 399 11,700,000 9.11
# "Garlic" 165 3,795,000 60.30
"Oth Dec":avg([ 8.88*466 ,5.73*682 ,2.48*3390 ,19.00*391,8.33*780,14.10*429 ,5.30*664 , 1.76 *3710,1750*2.06 ]),
#"Apples" 8.88 135,000 466 Tons Tons $/Ton
#"Apricots" 5.73 54,400 682 Tons Tons $/Ton
#"Cherries", 2.48 82,000 3,390 Tons Tons $/Ton
#"Pears", 19.00 220,000 391 Tons Tons $/Ton
#"Nectarines" 8.33 150,000 780 Tons Tons $/Ton
#"Peaches", 14.10 648,000 429 Tons Tons $/Ton
#"Plums", 5.30 95,400 664 Tons Tons $/Ton
#"Walnuts" 1.76 492,000 3,710 #tones Tons $/Ton
#"Pecans" 1,750 5,000 2.06 Pounds 1000pounds $/Pound
"Oth Fld":avg([1296.00* 27.1, 17.00*37.56]),
# sunflowers 1,296.00 751,500 27.1 Tons Tons $/Ton
# Sorghum2009 17.00 646,000 37.56 Tons Tons $/Ton
"Oth Trk":avg([320*29.60, 350*24.90, 32*152.00, 180*42.70, 107*248.00,425*41.70,385* 38.70 ,165*42.10,405*21.70 ]),
#"Carrots" 320 20,000,000 29.60 Cwt. Cwt. $/Cwt.
#"Lettuce" 350 33,600,000 24.90 Cwt. Cwt. $/Cwt.
#"Asparagus" 32 368,000 152.00 Cwt. Cwt. $/Cwt.
#"Cauliflower" 180 5,868,000 42.70 Cwt. Cwt. $/Cwt.
# berries 107 514,000 248.00 Cwt. Cwt. $/Cwt.
# "Peppers Bell", 425 8,465,000 41.70 Cwt. Cwt. $/Cwt.
# pepers Chile 385 2,640,000 38.70 Cwt. Cwt. $/Cwt.
# "Broccoli", 165 20,460,000 42.10 8 Cwt. Cwt. $/Cwt.
# "Cabbage", 405 5,670,000 21.70 Cwt. Cwt. $/Cwt.
"Pasture":0,
"Potato":425*17.1, # Cwt. Cwt. $/Cwt.
"Pro Tom":300*36.20, # Cwt. Cwt. $/Cwt
"Rice":84.80*20.9, # Cwt. Cwt. $/Cwt
"Safflwr": 2000.00*26.5, # Pounds Cwt. $/Cwt.
"SgrBeet": 43.40*52.1, # Tons Tons $/Ton
"Subtrop":avg([622*6.52,4.15*813 ]),
# orange 622 109000000 6.52
# Olives 4.15 166000 813 Tons Tons $/Ton
"Vine":900*5.07}# Cartons 3/ Cartons $/Carton
Econ_dict
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the next section we illustrate the range transformer with the Diet Problem, from DOcplex distributed examples.
Step2: Creating a Spark session
Step3: Using the transformer with a Spark dataframe
Step4: Then we extract the two vectors of min/max for each nutrient. Each vector has nb_nutrients elements.
Step5: We are now ready to prepare the transformer matrix. This matrix has shape (7, 11) as we
Step6: Populate a Spark dataframe with the matrix data
Step7: Let's display the dataframe schema and content
Step8: Solving the Diet problem with the $CplexRangeTransformer$ in a Pipeline
Step9: Example with CplexTransformer
|
<ASSISTANT_TASK:>
Python Code:
try:
import numpy as np
except ImportError:
raise RuntimError('This notebook requires numpy')
# the baseline diet data as Python lists of tuples.
FOODS = [
("Roasted Chicken", 0.84, 0, 10),
("Spaghetti W/ Sauce", 0.78, 0, 10),
("Tomato,Red,Ripe,Raw", 0.27, 0, 10),
("Apple,Raw,W/Skin", .24, 0, 10),
("Grapes", 0.32, 0, 10),
("Chocolate Chip Cookies", 0.03, 0, 10),
("Lowfat Milk", 0.23, 0, 10),
("Raisin Brn", 0.34, 0, 10),
("Hotdog", 0.31, 0, 10)
]
NUTRIENTS = [
("Calories", 2000, 2500),
("Calcium", 800, 1600),
("Iron", 10, 30),
("Vit_A", 5000, 50000),
("Dietary_Fiber", 25, 100),
("Carbohydrates", 0, 300),
("Protein", 50, 100)
]
FOOD_NUTRIENTS = [
("Roasted Chicken", 277.4, 21.9, 1.8, 77.4, 0.0, 0.0, 42.2),
("Spaghetti W/ Sauce", 358.2, 80.2, 2.3, 3055.2, 11.6, 58.3, 8.2),
("Tomato,Red,Ripe,Raw", 25.8, 6.2, 0.6, 766.3, 1.4, 5.7, 1.0),
("Apple,Raw,W/Skin", 81.4, 9.7, 0.2, 73.1, 3.7, 21.0, 0.3),
("Grapes", 15.1, 3.4, 0.1, 24.0, 0.2, 4.1, 0.2),
("Chocolate Chip Cookies", 78.1, 6.2, 0.4, 101.8, 0.0, 9.3, 0.9),
("Lowfat Milk", 121.2, 296.7, 0.1, 500.2, 0.0, 11.7, 8.1),
("Raisin Brn", 115.1, 12.9, 16.8, 1250.2, 4.0, 27.9, 4.0),
("Hotdog", 242.1, 23.5, 2.3, 0.0, 0.0, 18.0, 10.4)
]
nb_foods = len(FOODS)
nb_nutrients = len(NUTRIENTS)
print('#foods={0}'.format(nb_foods))
print('#nutrients={0}'.format(nb_nutrients))
assert nb_foods == len(FOOD_NUTRIENTS)
try:
import findspark
findspark.init()
except ImportError:
# Ignore exception: the 'findspark' module is required when executing Spark in a Windows environment
pass
import pyspark # Only run after findspark.init() (if running in a Windows environment)
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
mat_fn = np.matrix([FOOD_NUTRIENTS[f][1:] for f in range(nb_foods)])
print('The food-nutrient matrix has shape: {0}'.format(mat_fn.shape))
nutrient_mins = [NUTRIENTS[n][1] for n in range(nb_nutrients)]
nutrient_maxs = [NUTRIENTS[n][2] for n in range(nb_nutrients)]
food_names ,food_costs, food_mins, food_maxs = map(list, zip(*FOODS))
# step 1. add two lines for nutrient mins, maxs
nf2 = np.append(mat_fn, np.matrix([nutrient_mins, nutrient_maxs]), axis=0)
mat_nf = nf2.transpose()
mat_nf.shape
from pyspark.sql import SQLContext
sc = spark.sparkContext
sqlContext = SQLContext(sc)
columns = food_names + ['min', 'max']
food_nutrients_df = sqlContext.createDataFrame(mat_nf.tolist(), columns)
food_nutrients_df.printSchema()
food_nutrients_df.show()
from docplex.mp.sparktrans.transformers import CplexRangeTransformer
from pyspark.ml import Pipeline
from pyspark.sql.functions import *
# Create the optimization transformer to calculate the optimal quantity for each food for a balanced diet.
cplexSolve = CplexRangeTransformer(minCol='min', maxCol='max', ubs=food_maxs)
# Make evaluation on input data. Additional parameters are specified using the 'params' dictionary
diet_df = cplexSolve.transform(food_nutrients_df, params={cplexSolve.y: food_costs, cplexSolve.sense: 'min'})
diet_df.orderBy(desc("value")).show()
food_nutrients_LP_df = food_nutrients_df.select([item for item in food_nutrients_df.columns if item not in ['min']])
food_nutrients_LP_df.show()
from docplex.mp.sparktrans.transformers import CplexTransformer
# Create the optimization transformer to calculate the optimal quantity for each food for a balanced diet.
# Here, let's use the CplexTransformer by specifying only a maximum amount for each nutrient.
cplexSolve = CplexTransformer(rhsCol='max', ubs=food_maxs)
# Make evaluation on input data. Additional parameters are specified using the 'params' dictionary
# Since there is no lower range for decision variables, let's maximize cost instead! (otherwise, the result is all 0's)
diet_max_cost_df = cplexSolve.transform(food_nutrients_LP_df, params={cplexSolve.y: food_costs, cplexSolve.sense: 'max'})
diet_max_cost_df.orderBy(desc("value")).show()
%matplotlib inline
import matplotlib.pyplot as plt
def plot_radar_chart(labels, stats, **kwargs):
angles=np.linspace(0, 2*np.pi, len(labels), endpoint=False)
# close the plot
stats = np.concatenate((stats, [stats[0]]))
angles = np.concatenate((angles, [angles[0]]))
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
ax.plot(angles, stats, 'o-', linewidth=2, **kwargs)
ax.fill(angles, stats, alpha=0.30, **kwargs)
ax.set_thetagrids(angles * 180/np.pi, labels)
#ax.set_title([df.loc[386,"Name"]])
ax.grid(True)
diet = diet_df.toPandas()
plot_radar_chart(labels=diet['name'], stats=diet['value'], color='r')
diet_max_cost = diet_max_cost_df.toPandas()
plot_radar_chart(labels=diet_max_cost['name'], stats=diet_max_cost['value'], color='r')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Load data
Step2: 2.2 SST
Step3: 2.3 Preprocess
Step4: 3. Carry out Maximum Covariance Analysis
Step5: 3.2 Postprocess
Step6: 3.2.2 Extract the leading SLP MCA pattern and EC
Step7: 3.2.3 Extract the leading SST MCA pattern and EC
Step8: 4 Visualize MCA results
Step9: 4.2 Plot the leading SLP/SST MCA spatial pattern and EC
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import xarray as xr
import cartopy.crs as ccrs
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
mpl.rcParams['figure.figsize'] = 8.0, 4.0
mpl.rcParams['font.size'] = 13
ds1 = xr.open_dataset('data/slp.mnmean.hadslp2.nc')
slp = ds1.slp.sel(lat=slice(30, -30), lon=slice(180, 290), time=slice('1950-01-01','2005-12-31'))
lon_slp = ds1.lon.sel(lon=slice(180, 290))
lat_slp = ds1.lat.sel(lat=slice(30, -30))
dates = ds1.time.sel(time=slice('1950-01-01','2005-12-31')).values
# climatology
slp_clm = slp.groupby('time.month').mean(dim='time')
# anomaly
slp_anom = slp.groupby('time.month') - slp_clm
ds2 = xr.open_dataset('data/sst.mon.anom.kaplan.nc')
sst_anom = ds2.sst.sel(lat=slice(-30, 30), lon=slice(180, 290), time=slice('1950-01-01','2005-12-31'))
lat_sst = ds2.lat.sel(lat=slice(-30, 30))
lon_sst = ds2.lon.sel(lon=slice(180, 290))
slp2d = slp_anom.values
ntime, nrow_slp, ncol_slp = slp2d.shape
slp2d = np.reshape(slp2d, (ntime, nrow_slp*ncol_slp), order='F')
sst2d = sst_anom.values
ntime, nrow_sst, ncol_sst = sst2d.shape
sst2d = np.reshape(sst2d, (ntime, nrow_sst*ncol_sst), order='F')
nonMissingIndex = np.where(np.isnan(sst2d[0]) == False)[0]
sst2dNoMissing = sst2d[:, nonMissingIndex]
Cxy = np.dot(slp2d.T, sst2dNoMissing)/(ntime-1.0)
U, s, V = np.linalg.svd(Cxy, full_matrices=False)
V = V.T
scf = s**2./np.sum(s**2.0)
# SLP MCA pattern
U1 = np.reshape(U[:,0, None], (nrow_slp, ncol_slp), order='F')
# EC1 of SLP
a1 = np.dot(slp2d, U[:,0, np.newaxis])
# normalize
U1_norm = U1*np.std(a1)
a1_norm = a1/np.std(a1)
# SST MCA pattern
V1 = np.ones([nrow_sst*ncol_sst,1]) * np.NaN
V1 = V1.astype(V.dtype)
V1[nonMissingIndex,0] = V[:,0]
V1 = V1.reshape([nrow_sst,ncol_sst], order='F')
# EC1 of SST
b1 = np.dot(sst2dNoMissing, V[:,0, np.newaxis])
# normalize
V1_norm = V1*np.std(b1)
b1_norm = b1/np.std(b1)
plt.plot(np.cumsum(scf),'x')
plt.xlabel('SVD mode')
plt.ylabel('Cumulative squares covariance fraction')
plt.ylim([0.7,1.1])
plt.xlim([-0.5, 40])
gs = gridspec.GridSpec(2, 2)
gs.update(wspace=0.1, hspace=0.15)
fig = plt.figure(figsize = (14,10))
levels = np.arange(-1.0, 1.01, 0.05)
# SLP Pattern
ax0 = fig.add_subplot(gs[0,0], projection=ccrs.PlateCarree())
x1, y1 = np.meshgrid(lon_slp, lat_slp)
cs = ax0.contourf(x1, y1, U1_norm,
levels=levels,
transform=ccrs.PlateCarree(),
cmap='RdBu_r')
cb=fig.colorbar(cs, ax=ax0, shrink=0.8, aspect=20)
ax0.coastlines()
ax0.set_global()
ax0.set_extent([-180, -70, -19, 19])
ax0.set_title('Normalized SLP MCA Mode 1')
# SST Pattern
ax1 = fig.add_subplot(gs[0,1], projection=ccrs.PlateCarree())
x2, y2 = np.meshgrid(lon_sst, lat_sst)
cs2 = ax1.contourf(x2, y2, V1_norm,
levels=levels,
transform=ccrs.PlateCarree(),
cmap='RdBu_r')
cb=fig.colorbar(cs, ax=ax1, shrink=0.8, aspect=20)
ax1.coastlines()
ax1.set_global()
ax1.set_extent([-180, -70, -19, 19])
ax1.set_title('Normalized SST MCA Mode 1')
# EC1
ax2 = fig.add_subplot(gs[1,:])
ax2.plot(dates, a1_norm, label='SLP')
ax2.plot(dates, b1_norm, label='SST')
r = np.corrcoef(a1[:,0], b1[:,0])[0, 1]
ax2.set_title('Expansion Coefficients: SFC = '+ str(round(scf[0],2)) + ', R = ' + str(round(r,2)))
ax2.legend()
ax2.set_ylim([-4,4])
ax2.format_xdata = mdates.DateFormatter('%Y')
fig.autofmt_xdate()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The above bit of boiler-plate code is useful in a number of situations. Indeed, this is a pattern I regularly find myself using when writing scripts.
Step2: As for the main function itself, this ought to be at least somewhat easy to follow. We have a debug switch that allows us to determine where bombs are placed. We use this for testing.
Step3: Most of the code in the PlayGame function is concerned with parsing information from the user. When writing code, sometimes you have to make trade-offs, you can make things faster as the cost of memory, for example.
Step4: So the above code snippet is a smaller, simpler version of the code you will find in my mindsweeper implementation. Here's the problem
Step5: So this code would more or less do the same job. And although its easier to understand it does have the drawback that for every additional command we add we need to add several lines of code. Meanwhile, the solution I went for does away with all those nested if statements. In fact, adding an extra command requires just a single line of code (which we add to the COMMANDS dictionary).
Step6: By the way, in python it is possible to save a function as a variable (with the idea of calling it later), which the code below below hopefully illustrates
Step8: Combining all of these things means it is possible to write an argument parser in just a few lines of code. Moreover, adding new commands requires very little effort.
|
<ASSISTANT_TASK:>
Python Code:
## Assume that this code exists in a file named example.py
def main():
print(1 + 1)
if __name__ == "__main__":
main()
def main():
DEBUG = False #True
if DEBUG:
random.seed(243)
print("+--------------------------------+")
print("| WELCOME TO MINSWEEPER 1.0! |")
print("+--------------------------------+")
print("How to play: type 'commands' for a list of valid inputs. Then type 'help x' for information about how to use command 'x'")
print("")
game = PlayGame()
while game.is_playing:
s = input("Command: ")
game.parse_command(s)
display_board(game.player_board)
print("\n")
number = input("give me a number: ")
print("your selected number is: ", number)
def flag(x, y):
print(f"flag function was called. x = {x}, y = {y}")
def _help(topic=None):
if topic:
print(COMMANDS[topic][1])
def cheat():
print("cheating!")
## Command -> (function, help text)
COMMANDS = {
"flag": (flag, "Flags/deflags square(x,y). Example useage: flag x y"),
"help": (_help, "Selects square(x, y) to reveal, its game over if you reveal a bomb. Example useage: pick x y"),
"cheat": (cheat, "Shows the location of all bombs. Example useage: cheat") }
def parse_command(command):
instruction, *arguments = command.split(" ")
if instruction in COMMANDS:
return COMMANDS[instruction][0](*arguments)
else:
print("Parsing instruction failed")
# Example Calls:
command = "help cheat"
parse_command(command)
command2 = "flag 0 7"
parse_command(command2)
command3 = "cheat"
parse_command(command3)
def parse_command_if_version(command):
c = command.split(" ")
instruction = c[0]
args = c[1:]
if instruction == "help":
if len(args) == 0:
return _help()
if len(args) == 1:
topic = args[0]
return _help(topic)
if instruction == "cheat":
return cheat()
if instruction == "flag":
x = args[0]
y = args[1]
return flag(x, y)
# Example Calls:
command = "help cheat"
parse_command_if_version(command)
command2 = "flag 0 7"
parse_command_if_version(command2)
command3 = "cheat"
parse_command_if_version(command3)
def add(a, b):
return a + b
nums = [1, 2]
# add(nums) # this would fail
print(add(nums[0], nums[1]))
print(add(*nums))
def example(number):
return number
m = example # example is NOT called. m is merely a reference to a function.
n = example(20) # n calls example with the argument 20. The result is a number
print(n)
print(m(20)) # m(20) is the same as example(20)
def parse_command(command, command_dictionary):
command: str
command_dictionary: dict where the key a command and the value is a function reference
instruction, *arguments = command.split(" ")
if instruction in command_dictionary:
return command_dictionary[instruction](*arguments)
else:
return f"ERROR: '{instruction}' is not a valid command"
math_dict = { "sqrt": lambda x: int(x)**0.5,
"round": lambda x, precision: round(float(x), int(precision)),
"neg": lambda x: -float(x) }
string_dict = { "toCaps": str.upper,
"reverse": lambda x: x[::-1],
"join": lambda *x: "".join(list(x))}
print("STRING_DICT EXAMPLES...")
print(parse_command("toCaps hello", string_dict))
print(parse_command("reverse dlrow", string_dict))
print(parse_command("join h e l l o _ w o r l d", string_dict))
print()
print("MATH_DICT EXAMPLES...")
print(parse_command("sqrt 2", math_dict))
print(parse_command("round 10.98 1", math_dict))
print(parse_command("neg -2", math_dict))
print(parse_command("missing a b c", math_dict))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def NthCharacter(n ) :
s = ""
c = 1
while(True ) :
if(c < 10 ) :
s += chr(48 + c )
else :
s1 = ""
dup = c
while(dup > 0 ) :
s1 += chr(( dup % 10 ) + 48 )
dup //= 10
s1 = "". join(reversed(s1 ) )
s += s1
c += 1
if(len(s ) >= n ) :
return s[n - 1 ]
if __name__== "__main __":
n = 11
print(NthCharacter(n ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [convolutional.Conv2D.1] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='valid', data_format='channels_last', dilation_rate=(1,1), activation='linear', use_bias=False
Step2: [convolutional.Conv2D.2] 4 3x3 filters on 5x5x2 input, strides=(2,2), padding='valid', data_format='channels_last', dilation_rate=(1,1), activation='relu', use_bias=True
Step3: [convolutional.Conv2D.3] 5 4x4 filters on 7x7x3 input, strides=(2,1), padding='valid', data_format='channels_last', dilation_rate=(1,1), activation='relu', use_bias=True
Step4: [convolutional.Conv2D.4] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='same', data_format='channels_last', dilation_rate=(1,1), activation='relu', use_bias=True
Step5: [convolutional.Conv2D.5] 4 3x3 filters on 4x4x2 input, strides=(2,2), padding='same', data_format='channels_last', dilation_rate=(1,1), activation='relu', use_bias=True
Step6: [convolutional.Conv2D.6] 4 3x3 filters on 6x3x1 input, strides=(3,2), padding='same', data_format='channels_last', dilation_rate=(1,1), activation='relu', use_bias=True
Step7: [convolutional.Conv2D.7] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='valid', data_format='channels_last', dilation_rate=(2,2), activation='linear', use_bias=True
Step8: [convolutional.Conv2D.8] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='valid', data_format='channels_last', dilation_rate=(2,2), activation='linear', use_bias=False
Step9: [convolutional.Conv2D.9] 4 3x3 filters on 7x7x2 input, strides=(1,1), padding='valid', data_format='channels_last', dilation_rate=(3,3), activation='relu', use_bias=True
Step10: [convolutional.Conv2D.10] 3 4x4 filters on 4x8x3 input, strides=(1,1), padding='same', data_format='channels_last', dilation_rate=(2,2), activation='relu', use_bias=True
Step11: [convolutional.Conv2D.11] 4 3x3 filters on 8x8x2 input, strides=(1,1), padding='same', data_format='channels_last', dilation_rate=(4,4), activation='relu', use_bias=True
Step12: export for Keras.js tests
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='valid',
data_format='channels_last', dilation_rate=(1,1),
activation='linear', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(100)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='valid',
data_format='channels_last', dilation_rate=(1,1),
activation='linear', use_bias=False)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(101)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(2,2), padding='valid',
data_format='channels_last', dilation_rate=(1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(102)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (7, 7, 3)
conv = Conv2D(5, (4,4), strides=(2,1), padding='valid',
data_format='channels_last', dilation_rate=(1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(103)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='same',
data_format='channels_last', dilation_rate=(1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(104)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 2)
conv = Conv2D(4, (3,3), strides=(2,2), padding='same',
data_format='channels_last', dilation_rate=(1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(105)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 3, 1)
conv = Conv2D(4, (3,3), strides=(3,2), padding='same',
data_format='channels_last', dilation_rate=(1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(106)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.6'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='valid',
data_format='channels_last', dilation_rate=(2,2),
activation='linear', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(100)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.7'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='valid',
data_format='channels_last', dilation_rate=(2,2),
activation='linear', use_bias=False)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(101)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.8'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (7, 7, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='valid',
data_format='channels_last', dilation_rate=(3,3),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(102)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.9'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 8, 3)
conv = Conv2D(3, (4,4), strides=(1,1), padding='same',
data_format='channels_last', dilation_rate=(2,2),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(103)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.10'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (8, 8, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='same',
data_format='channels_last', dilation_rate=(4,4),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(104)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.11'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Toy example
Step2: At each point in time we plot the state estimate <i>after</i> accounting for the most recent measurement, which is why we are not at position 30 at time 0. The filter's attentiveness to the measurements allows it to correct for the initial bogus state we gave it. Then, by weighing its model and knowledge of the physical laws against new measurements, it is able to filter out much of the noise in the camera data. Meanwhile the confidence in the estimate increases with time, as shown by the graph below
Step3: The Kalman filter can also do <i>smoothing</i>, which takes in all of the input data at once and then constructs its best guess for the state of the system in each period post factum. That is, it does not provide online, running estimates, but instead uses all of the data to estimate the historical state, which is useful if we only want to use the data after we have collected all of it.
Step4: Example
Step5: This is a little hard to see, so we'll plot a subsection of the graph.
Step6: The advantage of the Kalman filter is that we don't need to select a window length, so we run less risk of overfitting. We do open ourselves up to overfitting with some of the initialization parameters for the filter, but those are slightly easier to objectively define. There's no free lunch and we can't eliminate overfitting, but a Kalman Filter is more rigorous than a moving average and generally better.
Step7: Let's figure out the inputs to our Kalman filter. We'll say that the state of our system is the line that the observations are following, with parameters $\alpha$ and $\beta$. Our inital guesses for these parameters is (0,0), with a covariance matrix (which describes the error of our guess) of all ones. As in the example of the rolling mean, we assume that our parameters follow a random walk (transition matrix is the identity) with a small error term (transition covariance is a small number times the identity).
Step8: Below we plot the means - that is, our best estimates - of $\alpha$ and $\beta$ over time.
Step9: Notice how much the parameters fluctuate over long periods of time. If we are basing a trading algorithm on this, such as something that involves beta hedging, it's important to have the best and most current estimate of the beta. To visualize how the system evolves through time, we plot every fifth state (linear model) below. For comparison, in black we have the line returned by using ordinary least-squares regression on the full dataset, which is very different.
Step10: Notice that although all of the state estimates take into account all previous observations, they fit the more recent data better than the older data. This allows the filter to adapt to structural changes in the data over time.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
# Import a Kalman filter and other useful libraries
from pykalman import KalmanFilter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import poly1d
tau = 0.1
# Set up the filter
kf = KalmanFilter(n_dim_obs=1, n_dim_state=2, # position is 1-dimensional, (x,v) is 2-dimensional
initial_state_mean=[30,10],
initial_state_covariance=np.eye(2),
transition_matrices=[[1,tau], [0,1]],
observation_matrices=[[1,0]],
observation_covariance=3,
transition_covariance=np.zeros((2,2)),
transition_offsets=[-4.9*tau**2, -9.8*tau])
# Create a simulation of a ball falling for 40 units of time (each of length tau)
times = np.arange(40)
actual = -4.9*tau**2*times**2
# Simulate the noisy camera data
sim = actual + 3*np.random.randn(40)
# Run filter on camera data
state_means, state_covs = kf.filter(sim)
plt.plot(times, state_means[:,0])
plt.plot(times, sim)
plt.plot(times, actual)
plt.legend(['Filter estimate', 'Camera data', 'Actual'])
plt.xlabel('Time')
plt.ylabel('Height');
print times
print state_means[:,0]
# Plot variances of x and v, extracting the appropriate values from the covariance matrix
plt.plot(times, state_covs[:,0,0])
plt.plot(times, state_covs[:,1,1])
plt.legend(['Var(x)', 'Var(v)'])
plt.ylabel('Variance')
plt.xlabel('Time');
# Use smoothing to estimate what the state of the system has been
smoothed_state_means, _ = kf.smooth(sim)
# Plot results
plt.plot(times, smoothed_state_means[:,0])
plt.plot(times, sim)
plt.plot(times, actual)
plt.legend(['Smoothed estimate', 'Camera data', 'Actual'])
plt.xlabel('Time')
plt.ylabel('Height');
# Load pricing data for a security
start = '2013-01-01'
end = '2015-01-01'
x = get_pricing('LMT', fields='price', start_date=start, end_date=end)
# Construct a Kalman filter
kf = KalmanFilter(transition_matrices = [1],
observation_matrices = [1],
initial_state_mean = 0,
initial_state_covariance = 1,
observation_covariance=1,
transition_covariance=.01)
# Use the observed values of the price to get a rolling mean
state_means, _ = kf.filter(x.values)
# Compute the rolling mean with various lookback windows
mean30 = pd.rolling_mean(x, 30)
mean60 = pd.rolling_mean(x, 60)
mean90 = pd.rolling_mean(x, 90)
# Plot original data and estimated mean
plt.plot(state_means)
plt.plot(x)
plt.plot(mean30)
plt.plot(mean60)
plt.plot(mean90)
plt.title('Kalman filter estimate of average')
plt.legend(['Kalman Estimate', 'X', '30-day Moving Average', '60-day Moving Average','90-day Moving Average'])
plt.xlabel('Day')
plt.ylabel('Price');
plt.plot(state_means[-200:])
plt.plot(x[-200:])
plt.plot(mean30[-200:])
plt.plot(mean60[-200:])
plt.plot(mean90[-200:])
plt.title('Kalman filter estimate of average')
plt.legend(['Kalman Estimate', 'X', '30-day Moving Average', '60-day Moving Average','90-day Moving Average'])
plt.xlabel('Day')
plt.ylabel('Price');
# Load pricing data
start = '2012-01-01'
end = '2015-01-01'
y = get_pricing('AMZN', fields='price', start_date=start, end_date=end)
x = get_pricing('SPY', fields='price', start_date=start, end_date=end)
# Plot data and use colormap to indicate the date each point corresponds to
cm = plt.get_cmap('jet')
colors = np.linspace(0.1, 1, len(x))
sc = plt.scatter(x, y, s=30, c=colors, cmap=cm, edgecolor='k', alpha=0.7)
cb = plt.colorbar(sc)
cb.ax.set_yticklabels([str(p.date()) for p in x[::len(x)//9].index])
plt.xlabel('SPY')
plt.ylabel('AMZN');
delta = 1e-3
trans_cov = delta / (1 - delta) * np.eye(2) # How much random walk wiggles
obs_mat = np.expand_dims(np.vstack([[x], [np.ones(len(x))]]).T, axis=1)
kf = KalmanFilter(n_dim_obs=1, n_dim_state=2, # y is 1-dimensional, (alpha, beta) is 2-dimensional
initial_state_mean=[0,0],
initial_state_covariance=np.ones((2, 2)),
transition_matrices=np.eye(2),
observation_matrices=obs_mat,
observation_covariance=2,
transition_covariance=trans_cov)
# Use the observations y to get running estimates and errors for the state parameters
state_means, state_covs = kf.filter(y.values)
_, axarr = plt.subplots(2, sharex=True)
axarr[0].plot(x.index, state_means[:,0], label='slope')
axarr[0].legend()
axarr[1].plot(x.index, state_means[:,1], label='intercept')
axarr[1].legend()
plt.tight_layout();
# Plot data points using colormap
sc = plt.scatter(x, y, s=30, c=colors, cmap=cm, edgecolor='k', alpha=0.7)
cb = plt.colorbar(sc)
cb.ax.set_yticklabels([str(p.date()) for p in x[::len(x)//9].index])
# Plot every fifth line
step = 5
xi = np.linspace(x.min()-5, x.max()+5, 2)
colors_l = np.linspace(0.1, 1, len(state_means[::step]))
for i, beta in enumerate(state_means[::step]):
plt.plot(xi, beta[0] * xi + beta[1], alpha=.2, lw=1, c=cm(colors_l[i]))
# Plot the OLS regression line
plt.plot(xi, poly1d(np.polyfit(x, y, 1))(xi), '0.4')
# Adjust axes for visibility
plt.axis([125, 210, 150, 410])
# Label axes
plt.xlabel('SPY')
plt.ylabel('AMZN');
# Get returns from pricing data
x_r = x.pct_change()[1:]
y_r = y.pct_change()[1:]
# Run Kalman filter on returns data
delta_r = 1e-2
trans_cov_r = delta_r / (1 - delta_r) * np.eye(2) # How much random walk wiggles
obs_mat_r = np.expand_dims(np.vstack([[x_r], [np.ones(len(x_r))]]).T, axis=1)
kf_r = KalmanFilter(n_dim_obs=1, n_dim_state=2, # y_r is 1-dimensional, (alpha, beta) is 2-dimensional
initial_state_mean=[0,0],
initial_state_covariance=np.ones((2, 2)),
transition_matrices=np.eye(2),
observation_matrices=obs_mat_r,
observation_covariance=.01,
transition_covariance=trans_cov_r)
state_means_r, _ = kf_r.filter(y_r.values)
# Plot data points using colormap
colors_r = np.linspace(0.1, 1, len(x_r))
sc = plt.scatter(x_r, y_r, s=30, c=colors_r, cmap=cm, edgecolor='k', alpha=0.7)
cb = plt.colorbar(sc)
cb.ax.set_yticklabels([str(p.date()) for p in x_r[::len(x_r)//9].index])
# Plot every fifth line
step = 5
xi = np.linspace(x_r.min()-4, x_r.max()+4, 2)
colors_l = np.linspace(0.1, 1, len(state_means_r[::step]))
for i, beta in enumerate(state_means_r[::step]):
plt.plot(xi, beta[0] * xi + beta[1], alpha=.2, lw=1, c=cm(colors_l[i]))
# Plot the OLS regression line
plt.plot(xi, poly1d(np.polyfit(x_r, y_r, 1))(xi), '0.4')
# Adjust axes for visibility
plt.axis([-0.03,0.03,-0.11, 0.11])
# Label axes
plt.xlabel('SPY returns')
plt.ylabel('AMZN returns');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Zaskakujące może być działanie operatora *, który nie oblicza iloczynu macierzy. Odpowiada za to funkcja dot.
Step2: Inne operacje dodawania i mnożenia
Step3: Sklejanie tablic
Step4: Typowe funkcje matematyczne
Step5: Inne funkcje
Step6: Wielowmiarowy tablice mają po jednym indeksie na wymiar
Step7: Warunki
Step8: Pętle i drukowanie
Step9: Liczby losowe
|
<ASSISTANT_TASK:>
Python Code:
a = np.array([3,4,5])
b = np.ones(3)
a - b
a = np.array([[1,2],[3,4]])
b = np.array([[1,2],[3,4]])
a
b
a * b
np.dot(a,b)
a = np.zeros((2,2),dtype='float')
a += 5
a
a *= 5
a
a + a
a = np.array([1,2,3])
b = np.array([4,5,6])
c = np.array([7,8,9])
np.hstack([a,b,c])
np.vstack([a,b,c])
x = np.arange(1,5)
np.sqrt(x) * np.pi
2**4
np.power(2,4)
np.log(np.e)
x = np.arange(5)
x.max() - x.min()
a = np.arange(10)
a[2:4]
a[:10:2]
a[::-1]
x = np.arange(12).reshape(3,4)
x
x[2,3]
x[:,1]
x[1,:]
x[1:3,:]
a = np.array([1,1,1,2,2,2,3,3,3])
a[a>1]
a[a==3]
np.where(a<3)
np.where(a<3)[0]
np.where(a>9)
for row in x:
print row
for element in x.flat:
print element
np.random.randint(0,10,5)
np.random.normal(0,1,5)
np.random.uniform(0,2,5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will try summarizing a small toy example; later we will use a larger piece of text. In reality, the text is too small, but it suffices as an illustrative example.
Step2: To summarize this text, we pass the <b>raw string data</b> as input to the function "summarize", and it will return a summary.
Step3: Use the "split" option if you want a list of strings instead of a single string.
Step4: You can adjust how much text the summarizer outputs via the "ratio" parameter or the "word_count" parameter. Using the "ratio" parameter, you specify what fraction of sentences in the original text should be returned as output. Below we specify that we want 50% of the original text (the default is 20%).
Step5: Using the "word_count" parameter, we specify the maximum amount of words we want in the summary. Below we have specified that we want no more than 50 words.
Step6: As mentioned earlier, this module also supports <b>keyword</b> extraction. Keyword extraction works in the same way as summary generation (i.e. sentence extraction), in that the algorithm tries to find words that are important or seem representative of the entire text. They keywords are not always single words; in the case of multi-word keywords, they are typically all nouns.
Step7: <h2>Larger example</h2>
Step8: If you know this movie, you see that this summary is actually quite good. We also see that some of the most important characters (Neo, Morpheus, Trinity) were extracted as keywords.
|
<ASSISTANT_TASK:>
Python Code:
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from gensim.summarization import summarize
text = "Thomas A. Anderson is a man living two lives. By day he is an " + \
"average computer programmer and by night a hacker known as " + \
"Neo. Neo has always questioned his reality, but the truth is " + \
"far beyond his imagination. Neo finds himself targeted by the " + \
"police when he is contacted by Morpheus, a legendary computer " + \
"hacker branded a terrorist by the government. Morpheus awakens " + \
"Neo to the real world, a ravaged wasteland where most of " + \
"humanity have been captured by a race of machines that live " + \
"off of the humans' body heat and electrochemical energy and " + \
"who imprison their minds within an artificial reality known as " + \
"the Matrix. As a rebel against the machines, Neo must return to " + \
"the Matrix and confront the agents: super-powerful computer " + \
"programs devoted to snuffing out Neo and the entire human " + \
"rebellion. "
print ('Input text:')
print (text)
print ('Summary:')
print (summarize(text))
print (summarize(text, split=True))
print ('Summary:')
print (summarize(text, ratio=0.5))
print ('Summary:')
print (summarize(text, word_count=50))
from gensim.summarization import keywords
print ('Keywords:')
print (keywords(text))
import requests
text = requests.get('http://rare-technologies.com/the_matrix_synopsis.txt').text
print ('Summary:')
print (summarize(text, ratio=0.01))
print ('\nKeywords:')
print (keywords(text, ratio=0.01))
import requests
text = requests.get('http://rare-technologies.com/the_big_lebowski_synopsis.txt').text
print ('Summary:')
print (summarize(text, ratio=0.01))
print ('\nKeywords:')
print (keywords(text, ratio=0.01))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This file has 23 features and 10,341 data points. Clearly not all of these features are useful for training a model. For example we have date and location. By applying principal component analysis (https
Step2: Next we have to standardise the data. Standardisation is an integral part of preprocessing for an SVM. It ensures all features exist on the same scale.
Step3: Implementing an SVM from scratch would be a tedious and tricky process. Luckily Scikit-Learn has already done so by creating a python wrapper for the C++ library LibSVM. LibSVM is a very efficient library for running SVM related tasks.
Step4: Of the input parameters above, the most important are C, class_weight, gamma and kernel. The purpose of C is to decide the trade off between fitting the model to the training set and maintaining a smooth hyperplane. class_weight simply denotes the structure of the training data provided relative to its classes. Our data is balanced hence we have used that here. gamma corresponds to how much influence a single training point has over the fitting of the hyperplane. In this example we have let sklearn select gamma automatically. Finally the kernel is the function that is responsible for finding the mathematical relationship between the independent feature vectors and corresponding classes. In our case we have selected 'rbf' or 'radial based field'. This kernel allows fitting of a non linear hyperplane to the data. This is useful as the relationship between our features (i.e. avgtemp, avghumid etc) and our classes (Fire, No fire) may not necessarily be linear.
Step5: In addition an accuracy score can be calculated similarily
Step6: This accuracy can be tweaked by changing hyper-parameters used to train the model as well as altering the data that is trained upon by means of changing the seed when splitting the data. Our final model obtains an accuracy of x%.
Step7: Making predictions
Step8: We then opted to append the predictions array above to a pandas dataFrame and compile that data frame as a new CSV 'svmoutput.csv'
Step9: As you can imagine the generated CSV has the same format as the input CSV with the only exception being the appended prediction column added to the end.
Step10: Our code in practice
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.io.parsers.read_csv(
'Data/NewBalanced.csv',
)
print(df.shape)
print('\n')
print(df.head(5))
print('\n')
print(df.tail(1))
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
df = pd.io.parsers.read_csv(
'Data/NewBalanced.csv',
header=None,
skiprows = [0],
usecols=[5,10,15,17,18,19,20,22]
)
X = df.values[:,:7]
y = df.values[:,7]
#split the data into training and testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=12345)
from sklearn import preprocessing
std_scale = preprocessing.StandardScaler().fit(X_train) #allows data to be standardised under the same scale
X_train_std = std_scale.transform(X_train)
X_test_std = std_scale.transform(X_test)
from sklearn.svm import SVC
clf = SVC(C=1.0, cache_size=200, class_weight='balanced', coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=True, random_state=None, shrinking=True,
tol=0.001, verbose=False)
clf.fit(X_train,y_train)
clf.predict(X_test)
print('Accuracy is: {}%'.format(clf.score(X_test, y_test, sample_weight=None)*100))
clf.predict_proba(X_test)
foredf = pd.io.parsers.read_csv(
'Data/svminput.csv',
header=None,
skiprows = [0],
usecols=[1,2,3,4,5,6,8,9,10,11]
)
X_forecast = foredf.values[:,3:]
X_forecast_std = std_scale.transform(X_forecast)
fore_pred = clf.predict(X_forecast_std)
forearray = foredf.values.tolist()
i = 0
for element in forearray:
element.append(fore_pred[i])
#element.append(fore_prob[i][1])
i +=1
df = pd.DataFrame(forearray)
df.to_csv('Data/svmoutput.csv')
df = pd.io.parsers.read_csv(
'Data/svmoutput.csv',
)
print(df.shape)
print('\n')
print(df.head(10))
'''
Author: Flinn Dolman
@License: MIT
An example script that leverages our code to train a model and make predictions based upon it. Predictions
are printed to stdout and then the model used to make the predictions is saved.
'''
from SVM import SVM
from Standardiser import Standardiser
def Main():
forecast_loc = 'Data/svminput.csv'
standard_data = Standardiser()
standard_data.initialise()
clf = SVM()
clf.initialise(standard_data.get_std_X_train(),standard_data.get_std_X_test(),standard_data.get_y_train(),standard_data.get_y_test())
print('\nThese are the predictions: {}\n'.format(clf.predictions()))
predictions, probs = clf.predictions()
y_test = standard_data.get_y_test()
for i in range(0,len(predictions)-1):
print('Prediction: {}, with probability: {}, correct value: {}'.format(predictions[i],probs[i], y_test[i]))
print('Accuracy is: {}%'.format(clf.accuracy()*100))
fore_Pred, fore_Prob = clf.forecast_Pred(standard_data.loadForecast(forecast_loc))
standard_data.make_CSV(fore_Pred,fore_Prob,'Data/svmoutputnew.csv')
clf.saveModel()
if __name__ =="__main__":
Main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TransientGenerator
Step2: SimulSurvey
Step3: Analysing the output
Step4: You can inspect the lightcurves manually. This example should return the lightcurve with the most points with S/N > 5.
Step5: The two figures below show how early the MNe are detected and at what redshifts. The simulation input parameters of transients that were not detected are also kept, so can check completeness.
|
<ASSISTANT_TASK:>
Python Code:
import os
home_dir = os.environ.get('HOME')
# Please enter the filename of the ztf_sim output file you would like to use. The example first determines
# your home directory and then uses a relative path (useful if working on several machines with different usernames)
survey_file = os.path.join(home_dir, 'data/ZTF/test_schedule_v6.db')
# Please enter the path to where you have placed the Schlegel, Finkbeiner & Davis (1998) dust map files
# You can also set the environment variable SFD_DIR to this path (in that case the variable below should be None)
sfd98_dir = os.path.join(home_dir, 'data/sfd98')
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import simsurvey
import sncosmo
from astropy.cosmology import Planck15
import simsurvey_tools as sst
# Load the ZTF CCD corners and filters
ccds = sst.load_ztf_ccds()
sst.load_ztf_filters()
# Load simulated survey from file (download from ftp://ftp.astro.caltech.edu/users/ebellm/one_year_sim_incomplete.db)
# Currently DES filters are used as proxies for ZTF filters
plan = simsurvey.SurveyPlan(load_opsim=survey_file, band_dict={'g': 'ztfg', 'r': 'ztfr', 'i': 'desi'}, ccds=ccds)
mjd_range = (plan.cadence['time'].min() - 30, plan.cadence['time'].max() + 30)
# To review the pointing schedule, you can use this table
plan.pointings
tr = simsurvey.get_transient_generator((0.0, 0.05),
transient='Ia',
template='salt2',
dec_range=(-30,90),
mjd_range=(mjd_range[0],
mjd_range[1]),
sfd98_dir=sfd98_dir)
survey = simsurvey.SimulSurvey(generator=tr, plan=plan)
lcs = survey.get_lightcurves(
#progress_bar=True, notebook=True # If you get an error because of the progress_bar, delete this line.
)
len(lcs.lcs)
lcs[0]
lcs.save('lcs_tutorial.pkl')
lcs = simsurvey.LightcurveCollection(load='lcs_tutorial.pkl')
_ = sncosmo.plot_lc(lcs[0])
plt.hist(lcs.stats['p_det'], lw=2, histtype='step', range=(-20,0), bins=20)
plt.xlabel('Detection phase (observer-frame)', fontsize='x-large')
_ = plt.ylabel(r'$N_{SNe}$', fontsize='x-large')
plt.hist(lcs.meta_full['z'], lw=1, histtype='step', range=(0,0.05), bins=20, label='all')
plt.hist(lcs.meta['z'], lw=2, histtype='step', range=(0,0.05), bins=20, label='detected')
plt.xlabel('Redshift', fontsize='x-large')
plt.ylabel(r'$N_{SNe}$', fontsize='x-large')
plt.xlim((0, 0.05))
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> Import the dataframe and remove any features that are all zero </h2>
Step2: <h2> Get mappings between sample names, file names, and sample classes </h2>
Step3: <h2> Plot the distribution of classification accuracy across multiple cross-validation splits - Kinda Dumb</h2>
Step4: <h2> pqn normalize your features </h2>
Step5: <h2>Random Forest & adaBoost with PQN-normalized data</h2>
Step6: <h2> RF & adaBoost with PQN-normalized, log-transformed data </h2>
Step7: <h2> Great, you can classify things. But make null models and do a sanity check to make
Step8: <h2> Let's check out some PCA plots </h2>
Step9: <h2> What about with all thre classes? </h2>
|
<ASSISTANT_TASK:>
Python Code:
import time
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.cross_validation import cross_val_score
#from sklearn.model_selection import StratifiedShuffleSplit
#from sklearn.model_selection import cross_val_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import roc_curve, auc
from sklearn.utils import shuffle
from scipy import interp
%matplotlib inline
def remove_zero_columns(X, threshold=1e-20):
# convert zeros to nan, drop all nan columns, the replace leftover nan with zeros
X_non_zero_colum = X.replace(0, np.nan).dropna(how='all', axis=1).replace(np.nan, 0)
#.dropna(how='all', axis=0).replace(np.nan,0)
return X_non_zero_colum
def zero_fill_half_min(X, threshold=1e-20):
# Fill zeros with 1/2 the minimum value of that column
# input dataframe. Add only to zero values
# Get a vector of 1/2 minimum values
half_min = X[X > threshold].min(axis=0)*0.5
# Add the half_min values to a dataframe where everything that isn't zero is NaN.
# then convert NaN's to 0
fill_vals = (X[X < threshold] + half_min).fillna(value=0)
# Add the original dataframe to the dataframe of zeros and fill-values
X_zeros_filled = X + fill_vals
return X_zeros_filled
toy = pd.DataFrame([[1,2,3,0],
[0,0,0,0],
[0.5,1,0,0]], dtype=float)
toy_no_zeros = remove_zero_columns(toy)
toy_filled_zeros = zero_fill_half_min(toy_no_zeros)
print toy
print toy_no_zeros
print toy_filled_zeros
### Subdivide the data into a feature table
data_path = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/processed/MTBLS315/'\
'uhplc_pos/xcms_result_25.csv'
## Import the data and remove extraneous columns
df = pd.read_csv(data_path, index_col=0)
df.shape
df.head()
# Make a new index of mz:rt
mz = df.loc[:,"mz"].astype('str')
rt = df.loc[:,"rt"].astype('str')
idx = mz+':'+rt
df.index = idx
df
# separate samples from xcms/camera things to make feature table
not_samples = ['mz', 'mzmin', 'mzmax', 'rt', 'rtmin', 'rtmax',
'npeaks', 'uhplc_pos',
]
samples_list = df.columns.difference(not_samples)
mz_rt_df = df[not_samples]
# convert to samples x features
X_df_raw = df[samples_list].T
# Remove zero-full columns and fill zeroes with 1/2 minimum values
X_df = remove_zero_columns(X_df_raw)
X_df_zero_filled = zero_fill_half_min(X_df)
print "original shape: %s \n# zeros: %f\n" % (X_df_raw.shape, (X_df_raw < 1e-20).sum().sum())
print "zero-columns repalced? shape: %s \n# zeros: %f\n" % (X_df.shape,
(X_df < 1e-20).sum().sum())
print "zeros filled shape: %s \n#zeros: %f\n" % (X_df_zero_filled.shape,
(X_df_zero_filled < 1e-20).sum().sum())
# Convert to numpy matrix to play nicely with sklearn
X = X_df.as_matrix()
print X.shape
# Get mapping between sample name and assay names
path_sample_name_map = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/raw/'\
'MTBLS315/metadata/a_UPLC_POS_nmfi_and_bsi_diagnosis.txt'
# Index is the sample name
sample_df = pd.read_csv(path_sample_name_map,
sep='\t', index_col=0)
sample_df = sample_df['MS Assay Name']
sample_df.shape
print sample_df.head(10)
# get mapping between sample name and sample class
path_sample_class_map = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/raw/'\
'MTBLS315/metadata/s_NMFI and BSI diagnosis.txt'
class_df = pd.read_csv(path_sample_class_map,
sep='\t')
# Set index as sample name
class_df.set_index('Sample Name', inplace=True)
class_df = class_df['Factor Value[patient group]']
print class_df.head(10)
# convert all non-malarial classes into a single classes
# (collapse non-malarial febril illness and bacteremia together)
class_map_df = pd.concat([sample_df, class_df], axis=1)
class_map_df.rename(columns={'Factor Value[patient group]': 'class'}, inplace=True)
class_map_df
binary_class_map = class_map_df.replace(to_replace=['non-malarial febrile illness', 'bacterial bloodstream infection' ],
value='non-malarial fever')
binary_class_map
# convert classes to numbers
le = preprocessing.LabelEncoder()
le.fit(binary_class_map['class'])
y = le.transform(binary_class_map['class'])
def rf_violinplot(X, y, n_iter=25, test_size=0.3, random_state=1,
n_estimators=1000):
cross_val_skf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size,
random_state=random_state)
clf = RandomForestClassifier(n_estimators=n_estimators, random_state=random_state)
scores = cross_val_score(clf, X, y, cv=cross_val_skf)
sns.violinplot(scores,inner='stick')
rf_violinplot(X,y)
# TODO - Switch to using caret for this bs..?
# Do multi-fold cross validation for adaboost classifier
def adaboost_violinplot(X, y, n_iter=25, test_size=0.3, random_state=1,
n_estimators=200):
cross_val_skf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state)
clf = AdaBoostClassifier(n_estimators=n_estimators, random_state=random_state)
scores = cross_val_score(clf, X, y, cv=cross_val_skf)
sns.violinplot(scores,inner='stick')
adaboost_violinplot(X,y)
# TODO PQN normalization, and log-transformation,
# and some feature selection (above certain threshold of intensity, use principal components), et
def pqn_normalize(X, integral_first=False, plot=False):
'''
Take a feature table and run PQN normalization on it
'''
# normalize by sum of intensities in each sample first. Not necessary
if integral_first:
sample_sums = np.sum(X, axis=1)
X = (X / sample_sums[:,np.newaxis])
# Get the median value of each feature across all samples
mean_intensities = np.median(X, axis=0)
# Divde each feature by the median value of each feature -
# these are the quotients for each feature
X_quotients = (X / mean_intensities[np.newaxis,:])
if plot: # plot the distribution of quotients from one sample
for i in range(1,len(X_quotients[:,1])):
print 'allquotients reshaped!\n\n',
#all_quotients = X_quotients.reshape(np.prod(X_quotients.shape))
all_quotients = X_quotients[i,:]
print all_quotients.shape
x = np.random.normal(loc=0, scale=1, size=len(all_quotients))
sns.violinplot(all_quotients)
plt.title("median val: %f\nMax val=%f" % (np.median(all_quotients), np.max(all_quotients)))
plt.plot( title="median val: ")#%f" % np.median(all_quotients))
plt.xlim([-0.5, 5])
plt.show()
# Define a quotient for each sample as the median of the feature-specific quotients
# in that sample
sample_quotients = np.median(X_quotients, axis=1)
# Quotient normalize each samples
X_pqn = X / sample_quotients[:,np.newaxis]
return X_pqn
# Make a fake sample, with 2 samples at 1x and 2x dilutions
X_toy = np.array([[1,1,1,],
[2,2,2],
[3,6,9],
[6,12,18]], dtype=float)
print X_toy
print X_toy.reshape(1, np.prod(X_toy.shape))
X_toy_pqn_int = pqn_normalize(X_toy, integral_first=True, plot=True)
print X_toy_pqn_int
print '\n\n\n'
X_toy_pqn = pqn_normalize(X_toy)
print X_toy_pqn
X_pqn = pqn_normalize(X)
print X_pqn
rf_violinplot(X_pqn, y)
# Do multi-fold cross validation for adaboost classifier
adaboost_violinplot(X_pqn, y)
X_pqn_nlog = np.log(X_pqn)
rf_violinplot(X_pqn_nlog, y)
adaboost_violinplot(X_pqn_nlog, y)
def roc_curve_cv(X, y, clf, cross_val,
path='/home/irockafe/Desktop/roc.pdf',
save=False, plot=True):
t1 = time.time()
# collect vals for the ROC curves
tpr_list = []
mean_fpr = np.linspace(0,1,100)
auc_list = []
# Get the false-positive and true-positive rate
for i, (train, test) in enumerate(cross_val):
clf.fit(X[train], y[train])
y_pred = clf.predict_proba(X[test])[:,1]
# get fpr, tpr
fpr, tpr, thresholds = roc_curve(y[test], y_pred)
roc_auc = auc(fpr, tpr)
#print 'AUC', roc_auc
#sns.plt.plot(fpr, tpr, lw=10, alpha=0.6, label='ROC - AUC = %0.2f' % roc_auc,)
#sns.plt.show()
tpr_list.append(interp(mean_fpr, fpr, tpr))
tpr_list[-1][0] = 0.0
auc_list.append(roc_auc)
if (i % 10 == 0):
print '{perc}% done! {time}s elapsed'.format(perc=100*float(i)/cross_val.n_iter, time=(time.time() - t1))
# get mean tpr and fpr
mean_tpr = np.mean(tpr_list, axis=0)
# make sure it ends up at 1.0
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(auc_list)
if plot:
# plot mean auc
plt.plot(mean_fpr, mean_tpr, label='Mean ROC - AUC = %0.2f $\pm$ %0.2f' % (mean_auc,
std_auc),
lw=5, color='b')
# plot luck-line
plt.plot([0,1], [0,1], linestyle = '--', lw=2, color='r',
label='Luck', alpha=0.5)
# plot 1-std
std_tpr = np.std(tpr_list, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.2,
label=r'$\pm$ 1 stdev')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve, {iters} iterations of {cv} cross validation'.format(
iters=cross_val.n_iter, cv='{train}:{test}'.format(test=cross_val.test_size, train=(1-cross_val.test_size)))
)
plt.legend(loc="lower right")
if save:
plt.savefig(path, format='pdf')
plt.show()
return tpr_list, auc_list, mean_fpr
rf_estimators = 1000
n_iter = 3
test_size = 0.3
random_state = 1
cross_val_rf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state)
clf_rf = RandomForestClassifier(n_estimators=rf_estimators, random_state=random_state)
rf_graph_path = '''/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revolutionizing_healthcare/data/MTBLS315/\
isaac_feature_tables/uhplc_pos/rf_roc_{trees}trees_{cv}cviter.pdf'''.format(trees=rf_estimators, cv=n_iter)
print cross_val_rf.n_iter
print cross_val_rf.test_size
tpr_vals, auc_vals, mean_fpr = roc_curve_cv(X_pqn, y, clf_rf, cross_val_rf,
path=rf_graph_path, save=False)
# For adaboosted
n_iter = 3
test_size = 0.3
random_state = 1
adaboost_estimators = 200
adaboost_path = '''/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revolutionizing_healthcare/data/MTBLS315/\
isaac_feature_tables/uhplc_pos/adaboost_roc_{trees}trees_{cv}cviter.pdf'''.format(trees=adaboost_estimators,
cv=n_iter)
cross_val_adaboost = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state)
clf = AdaBoostClassifier(n_estimators=adaboost_estimators, random_state=random_state)
adaboost_tpr, adaboost_auc, adaboost_fpr = roc_curve_cv(X_pqn, y, clf, cross_val_adaboost,
path=adaboost_path)
# Make a null model AUC curve
def make_null_model(X, y, clf, cross_val, random_state=1, num_shuffles=5, plot=True):
'''
Runs the true model, then sanity-checks by:
Shuffles class labels and then builds cross-validated ROC curves from them.
Compares true AUC vs. shuffled auc by t-test (assumes normality of AUC curve)
'''
null_aucs = []
print y.shape
print X.shape
tpr_true, auc_true, fpr_true = roc_curve_cv(X, y, clf, cross_val)
# shuffle y lots of times
for i in range(0, num_shuffles):
#Iterate through the shuffled y vals and repeat with appropriate params
# Retain the auc vals for final plotting of distribution
y_shuffle = shuffle(y)
cross_val.y = y_shuffle
cross_val.y_indices = y_shuffle
print 'Number of differences b/t original and shuffle: %s' % (y == cross_val.y).sum()
# Get auc values for number of iterations
tpr, auc, fpr = roc_curve_cv(X, y_shuffle, clf, cross_val, plot=False)
null_aucs.append(auc)
#plot the outcome
if plot:
flattened_aucs = [j for i in null_aucs for j in i]
my_dict = {'true_auc': auc_true, 'null_auc': flattened_aucs}
df_poop = pd.DataFrame.from_dict(my_dict, orient='index').T
df_tidy = pd.melt(df_poop, value_vars=['true_auc', 'null_auc'],
value_name='auc', var_name='AUC_type')
#print flattened_aucs
sns.violinplot(x='AUC_type', y='auc',
inner='points', data=df_tidy)
# Plot distribution of AUC vals
plt.title("Distribution of aucs")
#sns.plt.ylabel('count')
plt.xlabel('AUC')
#sns.plt.plot(auc_true, 0, color='red', markersize=10)
plt.show()
# Do a quick t-test to see if odds of randomly getting an AUC that good
return auc_true, null_aucs
# Make a null model AUC curve & compare it to null-model
# Random forest magic!
rf_estimators = 1000
n_iter = 50
test_size = 0.3
random_state = 1
cross_val_rf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state)
clf_rf = RandomForestClassifier(n_estimators=rf_estimators, random_state=random_state)
true_auc, all_aucs = make_null_model(X_pqn, y, clf_rf, cross_val_rf, num_shuffles=5)
# make dataframe from true and false aucs
flattened_aucs = [j for i in all_aucs for j in i]
my_dict = {'true_auc': true_auc, 'null_auc': flattened_aucs}
df_poop = pd.DataFrame.from_dict(my_dict, orient='index').T
df_tidy = pd.melt(df_poop, value_vars=['true_auc', 'null_auc'],
value_name='auc', var_name='AUC_type')
print df_tidy.head()
#print flattened_aucs
sns.violinplot(x='AUC_type', y='auc',
inner='points', data=df_tidy, bw=0.7)
plt.show()
from sklearn.decomposition import PCA
# Check PCA of things
def PCA_plot(X, y, n_components, plot_color, class_nums, class_names, title='PCA'):
pca = PCA(n_components=n_components)
X_pca = pca.fit(X).transform(X)
print zip(plot_color, class_nums, class_names)
for color, i, target_name in zip(plot_color, class_nums, class_names):
# plot one class at a time, first plot all classes y == 0
#print color
#print y == i
xvals = X_pca[y == i, 0]
print xvals.shape
yvals = X_pca[y == i, 1]
plt.scatter(xvals, yvals, color=color, alpha=0.8, label=target_name)
plt.legend(bbox_to_anchor=(1.01,1), loc='upper left', shadow=False)#, scatterpoints=1)
plt.title('PCA of Malaria data')
plt.show()
PCA_plot(X_pqn, y, 2, ['red', 'blue'], [0,1], ['malaria', 'non-malaria fever'])
PCA_plot(X, y, 2, ['red', 'blue'], [0,1], ['malaria', 'non-malaria fever'])
# convert classes to numbers
le = preprocessing.LabelEncoder()
le.fit(class_map_df['class'])
y_three_class = le.transform(class_map_df['class'])
print class_map_df.head(10)
print y_three_class
print X.shape
print y_three_class.shape
y_labels = np.sort(class_map_df['class'].unique())
print y_labels
colors = ['green', 'red', 'blue']
print np.unique(y_three_class)
PCA_plot(X_pqn, y_three_class, 2, colors, np.unique(y_three_class), y_labels)
PCA_plot(X, y_three_class, 2, colors, np.unique(y_three_class), y_labels)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Feature Engineering
Step2: Building the model and parameter tuning
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings("ignore")
%matplotlib inline
import sys
sys.path.append("..")
#Import standard pydata libs
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
filename = '../facies_vectors.csv'
training_data = pd.read_csv(filename)
training_data['Well Name'] = training_data['Well Name'].astype('category')
training_data['Formation'] = training_data['Formation'].astype('category')
training_data.describe()
#Visualize the distribution of facies for each well
wells = training_data['Well Name'].unique()
fig, ax = plt.subplots(5,2, figsize=(20,20))
for i, well in enumerate(wells):
row = i % ax.shape[0]
column = i // ax.shape[0]
counts = training_data[training_data['Well Name']==well].Facies.value_counts()
data_for_well = [counts[j] if j in counts.index else 0 for j in range(1,10)]
ax[row, column].bar(range(1,10), data_for_well, align='center')
ax[row, column].set_title("{well}".format(well=well))
ax[row, column].set_ylabel("Counts")
ax[row, column].set_xticks(range(1,10))
plt.show()
plt.figure(figsize=(10,10))
sns.heatmap(training_data.drop(['Formation', 'Well Name'], axis=1).corr())
dfs = []
for well in training_data['Well Name'].unique():
df = training_data[training_data['Well Name']==well].copy(deep=True)
df.sort_values('Depth', inplace=True)
for col in ['PE', 'GR']:
smooth_col = 'smooth_'+col
df[smooth_col] = pd.rolling_mean(df[col], window=25)
df[smooth_col].fillna(method='ffill', inplace=True)
df[smooth_col].fillna(method='bfill', inplace=True)
dfs.append(df)
training_data = pd.concat(dfs)
pe_mean = training_data.PE.mean()
sm_pe_mean = training_data.smooth_PE.mean()
training_data['PE'] = training_data.PE.replace({np.nan:pe_mean})
training_data['smooth_PE'] = training_data['smooth_PE'].replace({np.nan:sm_pe_mean})
formation_encoder = dict(zip(training_data.Formation.unique(), range(len(training_data.Formation.unique()))))
training_data['enc_formation'] = training_data.Formation.map(formation_encoder)
training_data.describe()
#Let's build a model
from sklearn import preprocessing
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics, cross_validation
from classification_utilities import display_cm
#We will take a look at an F1 score for each well
n_estimators=100
learning_rate=.01
random_state=0
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
title_length = 20
wells = training_data['Well Name'].unique()
for well in wells:
blind = training_data[training_data['Well Name']==well]
train = training_data[(training_data['Well Name']!=well)]
train_X = train.drop(['Formation', 'Well Name', 'Depth', 'Facies'], axis=1)
train_Y = train.Facies.values
test_X = blind.drop(['Formation', 'Well Name', 'Facies', 'Depth'], axis=1)
test_Y = blind.Facies.values
clf = AdaBoostClassifier(RandomForestClassifier(), n_estimators=200, learning_rate=learning_rate, random_state=random_state, algorithm='SAMME.R')
clf.fit(X=train_X, y=train_Y)
pred_Y = clf.predict(test_X)
f1 = metrics.f1_score(test_Y, pred_Y, average='micro')
print("*"*title_length)
print("{well}={f1:.4f}".format(well=well,f1=f1))
print("*"*title_length)
train_X, test_X, train_Y, test_Y = cross_validation.train_test_split(training_data.drop(['Formation', 'Well Name','Facies', 'Depth'], axis=1), training_data.Facies.values, test_size=.2)
print(train_X.shape)
print(train_Y.shape)
print(test_X.shape)
print(test_Y.shape)
clf = AdaBoostClassifier(RandomForestClassifier(), n_estimators=200, learning_rate=learning_rate, random_state=0, algorithm='SAMME.R')
clf.fit(train_X, train_Y)
pred_Y = clf.predict(test_X)
cm = metrics.confusion_matrix(y_true=test_Y, y_pred=pred_Y)
display_cm(cm, facies_labels, display_metrics=True)
validation_data = pd.read_csv("../validation_data_nofacies.csv")
dfs = []
for well in validation_data['Well Name'].unique():
df = validation_data[validation_data['Well Name']==well].copy(deep=True)
df.sort_values('Depth', inplace=True)
for col in ['PE', 'GR']:
smooth_col = 'smooth_'+col
df[smooth_col] = pd.rolling_mean(df[col], window=25)
df[smooth_col].fillna(method='ffill', inplace=True)
df[smooth_col].fillna(method='bfill', inplace=True)
dfs.append(df)
validation_data = pd.concat(dfs)
validation_data['enc_formation'] = validation_data.Formation.map(formation_encoder)
validation_data.describe()
X = training_data.drop(['Formation', 'Well Name', 'Depth','Facies'], axis=1)
Y = training_data.Facies.values
test_X = validation_data.drop(['Formation', 'Well Name', 'Depth'], axis=1)
clf = AdaBoostClassifier(RandomForestClassifier(), n_estimators=200, learning_rate=learning_rate, random_state=0)
clf.fit(X,Y)
predicted_facies = clf.predict(test_X)
validation_data['Facies'] = predicted_facies
validation_data.to_csv("Kr1m_SEG_ML_Attempt1.csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'hammoz-consortium', 'sandbox-2', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 数据中包含的信息:
Step2: 从整体信息与摘要信息中我们获得了什么?
Step3: 这每一条冰冷的数据背后,都是一个真实存在的人。我们先聚焦这些数据背后鲜活的生命,来看看这些数据背后的故事。
Step4: 我们找到的第一个人,她的全名('Name字段')叫做Astor, Mrs. John Jacob,从名字中可以断定她是约翰·雅各布的妻子。
Step5: 美国梅西百货股东Isidor Straus和他的妻子,花费了221英镑卖了泰坦尼克号的一等舱舱位。他们在南安普顿港口登船,最终生死未卜。但我们可以通过Python来看看这两位老夫妻的结局。
Step6: 我们先来看看有多少人和自己的家人一起登上了泰坦尼克号?
Step7: 在106个家庭中,其中Andersson安德森家族共有9名家族成员登船,我们来看看这个家庭在灾难中发生了什么?
Step8: Andersson家族中的一共有五个小孩子,其中有一个小男孩和四个小女孩。不幸的是这五个小孩子都在这场灾难中丧生了。他们中最小的只有两岁,最大的也不过11岁。
|
<ASSISTANT_TASK:>
Python Code:
# 引用数据分析需要使用的一些包
# 数据规整化
import pandas as pd
from pandas import Series,DataFrame
from collections import Counter
import numpy as np
import re
# 数据可视化
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
%matplotlib inline
# 机器学习
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
# 读取数据集
titanic_data = pd.read_csv('train.csv')
titanic_test_data = pd.read_csv('test.csv')
titanic_data.head()
# 查看数据的整体信息
titanic_data.info()
# 查看数据的摘要信息
titanic_data.describe()
fig = plt.figure(figsize=(5,4), dpi=120)
ax = fig.add_subplot(111)
ax = sns.countplot('Survived',data=titanic_data,hue='Sex')
titanic_data = titanic_data.fillna(method='ffill') # 填补数据缺失值
titanic_data.groupby(['Survived','Sex']).count()[['PassengerId']]
fig, axis1 = plt.subplots(1,1,figsize=(18,4))
titanic_data.Age = titanic_data.Age.apply(int)
average_age = titanic_data[["Age", "Survived"]].groupby(['Age'],as_index=False).mean()
ax = sns.barplot(x='Age', y='Survived', data=average_age)
# 通过姓氏查找人物数据的方法
def find_people(df,lastname):
name_list = []
for index,names in enumerate(df.Name.values):
if lastname in names.split(',')[0]:
name_list.append(df.iloc[index])
return name_list
# 找到约翰·雅各布·阿斯特四世
print(find_people(titanic_data,'Astor')) # 训练数据中的Astor
print('--------')
print(find_people(titanic_test_data,'Astor')) # 测试数据中的Astor
# 找到 Isidor Straus
find_people(titanic_test_data,'Straus')
# 寻找泰坦尼克号中的家庭
def get_family_lastname():
lname_list = []
fn = titanic_data.Name[titanic_data['SibSp'] >= 1] # 同辈亲戚人数大于1的乘客
fl = [name.split(',')[0] for name in fn.values] # 这些乘客的姓氏列表
fs = set(fl) # 对姓氏列表进行去重
# 确定为训练数据集中的家庭
for lname in fs:
if len(find_people(titanic_data,lname)) >= 2:
lname_list.append(lname)
return lname_list
# 在灾难发生后家庭成员存活状况
def family_info(family_lastname):
name_list = find_people(titanic_data,family_lastname) # 所有家庭成员
f_info = []
for i in name_list:
info = {}
info['lname'],info['fname'] = i.Name.split(',')
info['sex'] = i.Sex
info['Age'] = i.Age
info['Pclass'] = i.Pclass
info['Survived'] = i.Survived
f_info.append(info)
family_df = DataFrame(f_info) # 构建一个数组
return family_df
# 全部家庭的数组
all_family_df_list = []
for l in get_family_lastname():
all_family_df_list.append(family_info(l))
all_family_df = pd.concat(all_family_df_list)
all_family_df.head()
f_c = len(Counter(all_family_df.lname))
print('在泰坦尼克号中共有{}个家庭'.format(f_c))
family_member_count = all_family_df['fname'].groupby(all_family_df['lname']).count()
# 找到家庭成员最多的家庭
family_member_count[family_member_count == family_member_count.max()]
all_family_df[all_family_df.lname == 'Andersson']
r = re.compile(r'Mrs.(.*?)\(') # 通过正则表达式获取丈夫姓氏
husband_name = [] # 丈夫姓名列表
for l in all_family_df.fname:
n = r.findall(l)
if n:
n = n[0].strip()
husband_name.append(n)
# 排除同名的可能
def husband_and_wife(name):
haw_df = all_family_df[[True if name in n else False for n in all_family_df.fname]]
if len(haw_df) >= 2:
return haw_df
pass
# 判断家庭情况
def faw_situation(name):
faw = husband_and_wife(name)
faw_g_c = faw.groupby('lname').count()
faw_list = faw_g_c[faw_g_c.sex >= 2].index
faw_g_s = faw.groupby('lname').sum()
situation = []
for i in faw_list:
f_c = faw_g_c.loc[i,'Age']
s = faw_g_s.loc[i,'Survived']
if s == 0:
situation.append(('{} {} 夫妇'.format(i,name),'All dead'))
elif s == f_c:
situation.append(('{} {} 夫妇'.format(i,name),'All alive'))
else:
for p_info in find_people(titanic_data,i):
if name in p_info.Name:
if p_info.Survived == 0:
if p_info.Sex == 'female':
situation.append(('{} {} 夫妇'.format(i,name),'wife dead'))
break
else:
situation.append(('{} {} 夫妇'.format(i,name),'husband dead'))
break
if situation:
return situation
faw_situ_list = [] # 家庭情况列表
for n in husband_name:
if husband_and_wife(n) is not None:
if faw_situation(n):
faw_situ_list += faw_situation(n)
faw_situ_list[:5] # 只读取前5个数据
f_df = DataFrame(faw_situ_list,columns=['family','Dead or Alive'])
f_df.groupby('Dead or Alive').count()
fig = plt.figure(figsize=(5,4), dpi=120)
ax = fig.add_subplot(111)
ax = sns.countplot('Dead or Alive',data=f_df)
ax.set_title('Who dead in Titanic Disaster')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Functional API
Step2: Introduction
Step3: The shape of the data is set as a 784-dimensional vector.
Step4: The inputs that is returned contains information about the shape and dtype
Step5: Here's the dtype
Step6: You create a new node in the graph of layers by calling a layer on this inputs
Step7: The "layer call" action is like drawing an arrow from "inputs" to this layer
Step8: At this point, you can create a Model by specifying its inputs and outputs
Step9: Let's check out what the model summary looks like
Step10: You can also plot the model as a graph
Step11: And, optionally, display the input and output shapes of each layer
Step12: This figure and the code are almost identical. In the code version,
Step13: For further reading, see the training and evaluation guide.
Step14: For details, read the model serialization & saving guide.
Step15: Here, the decoding architecture is strictly symmetrical
Step16: As you can see, the model can be nested
Step17: Manipulate complex graph topologies
Step18: Now plot the model
Step19: When compiling this model, you can assign different losses to each output.
Step20: Since the output layers have different names, you could also specify
Step21: Train the model by passing lists of NumPy arrays of inputs and targets
Step22: When calling fit with a Dataset object, it should yield either a
Step23: Plot the model
Step24: Now train the model
Step25: Shared layers
Step26: Extract and reuse nodes in the graph of layers
Step27: And these are the intermediate activations of the model,
Step28: Use these features to create a new feature-extraction model that returns
Step29: This comes in handy for tasks like
Step30: For serialization support in your custom layer, define a get_config
Step31: Optionally, implement the class method from_config(cls, config) which is used
Step32: You can use any subclassed layer or model in the functional API
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
inputs = keras.Input(shape=(784,))
# Just for demonstration purposes.
img_inputs = keras.Input(shape=(32, 32, 3))
inputs.shape
inputs.dtype
dense = layers.Dense(64, activation="relu")
x = dense(inputs)
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
model.summary()
keras.utils.plot_model(model, "my_first_model.png")
keras.utils.plot_model(model, "my_first_model_with_shape_info.png", show_shapes=True)
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=["accuracy"],
)
history = model.fit(x_train, y_train, batch_size=64, epochs=2, validation_split=0.2)
test_scores = model.evaluate(x_test, y_test, verbose=2)
print("Test loss:", test_scores[0])
print("Test accuracy:", test_scores[1])
model.save("path_to_my_model")
del model
# Recreate the exact same model purely from the file:
model = keras.models.load_model("path_to_my_model")
encoder_input = keras.Input(shape=(28, 28, 1), name="img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
x = layers.Reshape((4, 4, 1))(encoder_output)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
autoencoder = keras.Model(encoder_input, decoder_output, name="autoencoder")
autoencoder.summary()
encoder_input = keras.Input(shape=(28, 28, 1), name="original_img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
decoder_input = keras.Input(shape=(16,), name="encoded_img")
x = layers.Reshape((4, 4, 1))(decoder_input)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
decoder = keras.Model(decoder_input, decoder_output, name="decoder")
decoder.summary()
autoencoder_input = keras.Input(shape=(28, 28, 1), name="img")
encoded_img = encoder(autoencoder_input)
decoded_img = decoder(encoded_img)
autoencoder = keras.Model(autoencoder_input, decoded_img, name="autoencoder")
autoencoder.summary()
def get_model():
inputs = keras.Input(shape=(128,))
outputs = layers.Dense(1)(inputs)
return keras.Model(inputs, outputs)
model1 = get_model()
model2 = get_model()
model3 = get_model()
inputs = keras.Input(shape=(128,))
y1 = model1(inputs)
y2 = model2(inputs)
y3 = model3(inputs)
outputs = layers.average([y1, y2, y3])
ensemble_model = keras.Model(inputs=inputs, outputs=outputs)
num_tags = 12 # Number of unique issue tags
num_words = 10000 # Size of vocabulary obtained when preprocessing text data
num_departments = 4 # Number of departments for predictions
title_input = keras.Input(
shape=(None,), name="title"
) # Variable-length sequence of ints
body_input = keras.Input(shape=(None,), name="body") # Variable-length sequence of ints
tags_input = keras.Input(
shape=(num_tags,), name="tags"
) # Binary vectors of size `num_tags`
# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name="priority")(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name="department")(x)
# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(
inputs=[title_input, body_input, tags_input],
outputs=[priority_pred, department_pred],
)
keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.BinaryCrossentropy(from_logits=True),
keras.losses.CategoricalCrossentropy(from_logits=True),
],
loss_weights=[1.0, 0.2],
)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"priority": keras.losses.BinaryCrossentropy(from_logits=True),
"department": keras.losses.CategoricalCrossentropy(from_logits=True),
},
loss_weights={"priority": 1.0, "department": 0.2},
)
# Dummy input data
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tags_data = np.random.randint(2, size=(1280, num_tags)).astype("float32")
# Dummy target data
priority_targets = np.random.random(size=(1280, 1))
dept_targets = np.random.randint(2, size=(1280, num_departments))
model.fit(
{"title": title_data, "body": body_data, "tags": tags_data},
{"priority": priority_targets, "department": dept_targets},
epochs=2,
batch_size=32,
)
inputs = keras.Input(shape=(32, 32, 3), name="img")
x = layers.Conv2D(32, 3, activation="relu")(inputs)
x = layers.Conv2D(64, 3, activation="relu")(x)
block_1_output = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_1_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_2_output = layers.add([x, block_1_output])
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_2_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_3_output = layers.add([x, block_2_output])
x = layers.Conv2D(64, 3, activation="relu")(block_3_output)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation="relu")(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs, outputs, name="toy_resnet")
model.summary()
keras.utils.plot_model(model, "mini_resnet.png", show_shapes=True)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=["acc"],
)
# We restrict the data to the first 1000 samples so as to limit execution time
# on Colab. Try to train on the entire dataset until convergence!
model.fit(x_train[:1000], y_train[:1000], batch_size=64, epochs=1, validation_split=0.2)
# Embedding for 1000 unique words mapped to 128-dimensional vectors
shared_embedding = layers.Embedding(1000, 128)
# Variable-length sequence of integers
text_input_a = keras.Input(shape=(None,), dtype="int32")
# Variable-length sequence of integers
text_input_b = keras.Input(shape=(None,), dtype="int32")
# Reuse the same layer to encode both inputs
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
vgg19 = tf.keras.applications.VGG19()
features_list = [layer.output for layer in vgg19.layers]
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
img = np.random.random((1, 224, 224, 3)).astype("float32")
extracted_features = feat_extraction_model(img)
class CustomDense(layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
class CustomDense(layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(config, custom_objects={"CustomDense": CustomDense})
units = 32
timesteps = 10
input_dim = 5
# Define a Functional model
inputs = keras.Input((None, units))
x = layers.GlobalAveragePooling1D()(inputs)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
class CustomRNN(layers.Layer):
def __init__(self):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
# Our previously-defined Functional model
self.classifier = model
def call(self, inputs):
outputs = []
state = tf.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = tf.stack(outputs, axis=1)
print(features.shape)
return self.classifier(features)
rnn_model = CustomRNN()
_ = rnn_model(tf.zeros((1, timesteps, input_dim)))
units = 32
timesteps = 10
input_dim = 5
batch_size = 16
class CustomRNN(layers.Layer):
def __init__(self):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
self.classifier = layers.Dense(1)
def call(self, inputs):
outputs = []
state = tf.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = tf.stack(outputs, axis=1)
return self.classifier(features)
# Note that you specify a static batch size for the inputs with the `batch_shape`
# arg, because the inner computation of `CustomRNN` requires a static batch size
# (when you create the `state` zeros tensor).
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN()(x)
model = keras.Model(inputs, outputs)
rnn_model = CustomRNN()
_ = rnn_model(tf.zeros((1, 10, 5)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The plot below show the Rosenbrock function in faded black and a blue quadratic approximation to the function about the blue dot.
Step2: We now add a trust region radius as a red dashed line, and show the optimal point (from the blue model) within that radius as a red dot. An actual trust region algorithm doesn't necessary find the exact optimum (it uses a more efficient, but approximate method to avoid actually solving an internal optimization problem), but for our purposes we will assume that it does. You can change the trust region radius with the slider below. Note that at large radii, the solution is unaffected by the radius and sits at the minimum point of the quadratic. As the trust region radius decreases, the constrained optimum moves and the step direction changes.
|
<ASSISTANT_TASK:>
Python Code:
ymin = -4.0
ymax = 8.0
xmin = -4.0
xmax = 6.0
x0 = np.array([2.0, 1.0])
def rosenbrock(xvec):
x = xvec[0]
y = xvec[1]
f = (1.0 - x)**2 + 100.0*(y - x**2)**2
g = np.zeros(2)
g[0] = -2*(1 - x) + 200*(y - x**2)*-2*x
g[1] = 200*(y - x**2)
H = np.zeros((2, 2))
H[0, 0] = 2.0 + 1200.0*x**2 - 400.0*y
H[0, 1] = -400.0*x
H[1, 0] = -400.0*x
H[1, 1] = 200.0
return f, g, H
f, g, H = rosenbrock(x0)
def quad(x):
s = x - x0
fnew = f + np.dot(g, s) + 0.5*np.dot(s, np.dot(H, s))
gnew = g + np.dot(H, s)
return fnew, gnew
n = 200
x = np.linspace(xmin, xmax, n)
y = np.linspace(ymin, ymax, n)
[X, Y] = np.meshgrid(x, y, indexing='ij')
Z = np.zeros((n, n))
Zquad = np.zeros((n, n))
for i in range(n):
for j in range(n):
xvec = np.array([X[i, j], Y[i, j]])
Z[i, j], _, _ = rosenbrock(xvec)
Zquad[i, j], _ = quad(xvec)
# plot rosenbrock
plt.figure()
levels = [1., 10.0, 100.0, 1000.0, 1e4]
plt.contour(X, Y, Z, levels, colors='k', alpha=0.3)
plt.xlabel('x')
plt.ylabel('y')
# plot quadratic model
levels = [1., 10.0, 100.0, 1000.0, 1e4]
plt.contour(X, Y, Zquad, levels, colors='b')
plt.plot(x0[0], x0[1], 'bo')
plt.show()
@interact(R=(0.0,5.0,0.1))
def radius(R=3.5):
# redefine constraint
def con(x):
return [R - np.linalg.norm(x - x0)]
# solve optimization problem
constraints = {'type': 'ineq', 'fun': con}
# options={'disp': True, 'iprint': 2, 'maxiter': 1000}
res = minimize(quad, x0, method='SLSQP', jac=True, constraints=constraints) #, options=options)
# plot rosenbrock
plt.figure()
levels = [1., 10.0, 100.0, 1000.0, 1e4]
plt.contour(X, Y, Z, levels, colors='k', alpha=0.3)
plt.xlabel('x')
plt.ylabel('y')
# plot quadratic model
levels = [1., 10.0, 100.0, 1000.0, 1e4]
plt.contour(X, Y, Zquad, levels, colors='b')
plt.plot(x0[0], x0[1], 'bo')
# plot circle boundary
theta = np.linspace(-np.pi, np.pi, 300)
xt = x0[0] + R*np.cos(theta)
yt = x0[1] + R*np.sin(theta)
plt.plot(xt, yt, 'r--')
# plot optimal point within boundary (using model)
xopt = res.x
plt.plot([x0[0], xopt[0]], [x0[1], xopt[1]], 'r')
plt.plot(xopt[0], xopt[1], 'ro')
# plt.axis('equal')
plt.xlim([-4, 8])
plt.ylim([-4, 6])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the twitter package to interface with the Twitter API
Step2: Example 1. Authorizing an application to access Twitter account data
Step3: Example 2. Retrieving trends
Step4: Look for the WOEID for san-diego
Step5: Example 3. Displaying API responses as pretty-printed JSON
Step6: Example 4. Computing the intersection of two sets of trends
Step7: Example 5. Collecting search results
Step8: Twitter often returns duplicate results, we can filter them out checking for duplicate texts
Step9: Example 6. Extracting text, screen names, and hashtags from tweets
Step10: Example 7. Creating a basic frequency distribution from the words in tweets
Step11: Example 8. Create a prettyprint function to display tuples in a nice tabular format
Step12: Example 9. Finding the most popular retweets
Step13: We can build another prettyprint function to print entire tweets with their retweet count.
|
<ASSISTANT_TASK:>
Python Code:
import pickle
import os
if not os.path.exists('secret_twitter_credentials.pkl'):
Twitter={}
Twitter['Consumer Key'] = ''
Twitter['Consumer Secret'] = ''
Twitter['Access Token'] = ''
Twitter['Access Token Secret'] = ''
with open('secret_twitter_credentials.pkl','wb') as f:
pickle.dump(Twitter, f)
else:
Twitter=pickle.load(open('secret_twitter_credentials.pkl','rb'))
import pip
!pip install twitter
import twitter
auth = twitter.oauth.OAuth(Twitter['Access Token'],
Twitter['Access Token Secret'],
Twitter['Consumer Key'],
Twitter['Consumer Secret'])
twitter_api = twitter.Twitter(auth=auth)
# Nothing to see by displaying twitter_api except that it's now a
# defined variable
print(twitter_api)
WORLD_WOE_ID = 1
US_WOE_ID = 23424977
LOCAL_WOE_ID=2487889
# Prefix ID with the underscore for query string parameterization.
# Without the underscore, the twitter package appends the ID value
# to the URL itself as a special case keyword argument.
world_trends = twitter_api.trends.place(_id=WORLD_WOE_ID)
us_trends = twitter_api.trends.place(_id=US_WOE_ID)
local_trends = twitter_api.trends.place(_id=LOCAL_WOE_ID)
world_trends[:2]
trends=local_trends
print(type(trends))
print(list(trends[0].keys()))
print(trends[0]['trends'])
import json
print((json.dumps(us_trends[:2], indent=1)))
trends_set = {}
trends_set['world'] = set([trend['name']
for trend in world_trends[0]['trends']])
trends_set['us'] = set([trend['name']
for trend in us_trends[0]['trends']])
trends_set['san diego'] = set([trend['name']
for trend in local_trends[0]['trends']])
for loc in ['world','us','san diego']:
print(('-'*10,loc))
print((','.join(trends_set[loc])))
print(( '='*10,'intersection of world and us'))
print((trends_set['world'].intersection(trends_set['us'])))
print(('='*10,'intersection of us and san-diego'))
print((trends_set['san diego'].intersection(trends_set['us'])))
q = '#MTVAwards'
number = 100
# See https://dev.twitter.com/docs/api/1.1/get/search/tweets
search_results = twitter_api.search.tweets(q=q, count=number)
statuses = search_results['statuses']
len(statuses)
print(statuses)
all_text = []
filtered_statuses = []
for s in statuses:
if not s["text"] in all_text:
filtered_statuses.append(s)
all_text.append(s["text"])
statuses = filtered_statuses
len(statuses)
[s['text'] for s in search_results['statuses']]
# Show one sample search result by slicing the list...
print(json.dumps(statuses[0], indent=1))
# The result of the list comprehension is a list with only one element that
# can be accessed by its index and set to the variable t
t = statuses[0]
#[ status for status in statuses
# if status['id'] == 316948241264549888 ][0]
# Explore the variable t to get familiarized with the data structure...
print(t['retweet_count'])
print(t['retweeted'])
status_texts = [ status['text']
for status in statuses ]
screen_names = [ user_mention['screen_name']
for status in statuses
for user_mention in status['entities']['user_mentions'] ]
hashtags = [ hashtag['text']
for status in statuses
for hashtag in status['entities']['hashtags'] ]
# Compute a collection of all words from all tweets
words = [ w
for t in status_texts
for w in t.split() ]
# Explore the first 5 items for each...
print(json.dumps(status_texts[0:5], indent=1))
print(json.dumps(screen_names[0:5], indent=1))
print(json.dumps(hashtags[0:5], indent=1))
print(json.dumps(words[0:5], indent=1))
from collections import Counter
for item in [words, screen_names, hashtags]:
c = Counter(item)
print(c.most_common()[:10]) # top 10
print()
def prettyprint_counts(label, list_of_tuples):
print("\n{:^20} | {:^6}".format(label, "Count"))
print("*"*40)
for k,v in list_of_tuples:
print("{:20} | {:>6}".format(k,v))
for label, data in (('Word', words),
('Screen Name', screen_names),
('Hashtag', hashtags)):
c = Counter(data)
prettyprint_counts(label, c.most_common()[:10])
retweets = [
# Store out a tuple of these three values ...
(status['retweet_count'],
status['retweeted_status']['user']['screen_name'],
status['text'].replace("\n","\\"))
# ... for each status ...
for status in statuses
# ... so long as the status meets this condition.
if 'retweeted_status' in status
]
row_template = "{:^7} | {:^15} | {:50}"
def prettyprint_tweets(list_of_tuples):
print()
print(row_template.format("Count", "Screen Name", "Text"))
print("*"*60)
for count, screen_name, text in list_of_tuples:
print(row_template.format(count, screen_name, text[:50]))
if len(text) > 50:
print(row_template.format("", "", text[50:100]))
if len(text) > 100:
print(row_template.format("", "", text[100:]))
# Slice off the first 5 from the sorted results and display each item in the tuple
prettyprint_tweets(sorted(retweets, reverse=True)[:10])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step9: http
Step13: TRY ON OUR DATA
Step17: http
Step24: Go through and define all the nodes first, then add edges by iterating over each line?
Step28: http
Step47: http
Step59: What is different about this call realtive to the one that didn't work before?
|
<ASSISTANT_TASK:>
Python Code:
# http://neo4j.com/docs/developer-manual/current/cypher/#query-load-csv
command =
LOAD CSV WITH HEADERS FROM "https://gist.githubusercontent.com/jexp/d788e117129c3730a042/raw/1bd8c19bf8b49d9eb7149918cc11a34faf996dd8/people.tsv"
AS line
FIELDTERMINATOR '\t'
CREATE (:Artist)
#CREATE (:Artist {company:line[1], year: toInt(line[2])})
result = session.run(command.rstrip())
for record in result:
print(record)
# http://neo4j.com/docs/developer-manual/current/cypher/#query-load-csv
command =
LOAD CSV WITH HEADERS FROM "https://gist.githubusercontent.com/jexp/d788e117129c3730a042/raw/1bd8c19bf8b49d9eb7149918cc11a34faf996dd8/people.tsv"
AS line
FIELDTERMINATOR '\t'
CREATE (:Artist {company:line['Company Name'], year:line.year})
#CREATE (:Artist {company:line[1], year: toInt(line[2])})
result = session.run(command.rstrip())
for record in result:
print(record)
result = session.run( match(n) return n )
for record in result:
print(record)
# http://neo4j.com/docs/developer-manual/current/cypher/#query-load-csv
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# make new stuff:
command =
LOAD CSV WITH HEADERS FROM "https://gist.githubusercontent.com/jexp/d788e117129c3730a042/raw/1bd8c19bf8b49d9eb7149918cc11a34faf996dd8/people.tsv"
AS line
FIELDTERMINATOR '\t'
CREATE (:Artist {company:line['Company Name'], year:line.year})
session.run(command.rstrip())
result = session.run( match(n) return n )
for record in result:
print(record)
for record in result:
print(record)
! head /Users/janet/Neo4j_meta4/data/network_broken.tsv
session.run('dbms.security.allow_csv_import_from_file_urls=true')
# TRY ON OUR DATA
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# make new stuff:
command =
LOAD CSV WITH HEADERS FROM 'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken.tsv'
AS line
FIELDTERMINATOR '\t'
WITH line LIMIT 4
RETURN line
result = session.run(command.rstrip())
for record in result:
print(record)
print('----------------------')
result = session.run( match(n) return n )
for record in result:
print(record)
# Show that I can load the 20-line CSV.
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# make new stuff:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
RETURN line
result = session.run(command.rstrip())
for record in result:
print(record)
print('----------------------')
result = session.run( match(n) return n )
for record in result:
print(record)
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# make new stuff:
command =
LOAD CSV FROM //WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MERGE (n:A {col1 : line[0], col2: line[1], col3: line[2]})
WITH line, n
MERGE (m:B {ID : line[1]})
WITH m,n
MERGE (n)-[:LIKES]->(m);
result = session.run(command.rstrip())
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of results from building: {}'.format(num_results))
#print('----------------------')
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of results from querying: {}'.format(num_results))
# Makes duplicate nodes:
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# make new stuff:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
CREATE (:Gene {source:line.source})
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added: {}'.format(num_results))
# ------------------------------------------------------------------------
# Makes duplicate nodes:
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# make new stuff:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
CREATE (:Gene {source:line.source})
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added: {}'.format(num_results))
# ------------------------------------------------------------------------
# Makes 0 nodes, but it was really close.
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# make new stuff:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MATCH (g:Gene {source:line.source})
MATCH (g2:Gene {target:line.target})
MERGE (g) -[:x]-> (g2) // Parentheses are required to identify nodes in patterns, i.e. (g)
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added: {}'.format(num_results))
# ------------------------------------------------------------------------
# Makes 4 nodes. This is a good start!!
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# make new stuff:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MERGE (g:Gene {gene:line.source}) ON CREATE SET
g.organism=line.source_organism
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added: {}'.format(num_results))
# ------------------------------------------------------------------------
# Aim to make all the nodes.
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# add the source nodes, if they don't already exist:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MERGE (g:Gene {gene:line.source}) ON CREATE SET
g.organism=line.source_organism
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added from source column: {}'.format(num_results))
print('-----------------------')
# ------------------------------------------------------------------------
# add the sink nodes, if they don't already exist:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MERGE (g:Gene {gene:line.target}) ON CREATE SET
g.organism=line.target_organism
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes after adding dest column: {}'.format(num_results))
# WORKS!
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# add the source nodes, if they don't already exist:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MERGE (g:Gene {gene:line.source}) ON CREATE SET
g.organism=line.source_organism
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added from source column: {}'.format(num_results))
# ------------------------------------------------------------------------
# add the sink nodes, if they don't already exist:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MERGE (g:Gene {gene:line.target}) ON CREATE SET
g.organism=line.target_organism
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes after adding dest column: {}'.format(num_results))
# ------------------------------------------------------------------------
# add the edges:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MATCH (g1:Gene {gene:line.source})
MATCH (g2:Gene {gene:line.target})
MERGE (g1) -[:ABCD]-> (g2);
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) -[e]->(m) return e )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added from dest column: {}'.format(num_results))
# Do it in one Cypher call.
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# add the source nodes, if they don't already exist:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MERGE (g1:Gene {gene:line.source}) ON CREATE SET
g1.organism=line.source_organism
MERGE (g2:Gene {gene:line.target}) ON CREATE SET
g2.organism=line.target_organism
MERGE (g1) -[:ABCD]-> (g2);
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added from source and dest column: {}'.format(num_results))
# ------------------------------------------------------------------------
# Fill in more attributes. You can put the attributes in the dict-like thing.
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# add the source nodes, if they don't already exist:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MERGE (g1:Gene {org_gene:line.source,
organism:line.source_organism,
gene:line.source_gene})
MERGE (g2:Gene {org_gene:line.target}) ON CREATE SET
g2.organism=line.target_organism, g2.gene=line.target_gene
MERGE (g1) -[:ABCD]-> (g2);
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added from source and dest column: {}'.format(num_results))
# ------------------------------------------------------------------------
# Fill in more attributes. Add in edges with attributes from the line.
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# add the source nodes, if they don't already exist:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MERGE (g1:Gene {org_gene:line.source,
organism:line.source_organism,
gene:line.source_gene})
MERGE (g2:Gene {org_gene:line.target,
organism:line.target_organism,
gene:line.target_gene})
MERGE (g1) -[:X {weight:line.weight, association:line.association}]-> (g2);
result = session.run(command.rstrip())
# There might be better ways to do this...
# http://stackoverflow.com/questions/29922140/labels-on-nodes-and-relationships-from-a-csv-file
command =
match (n {organism:'Ga0081607'})
set n :organism_A
return n
result = session.run(command.rstrip())
command =
match (n {organism:'Ga0081629'})
set n :organism_B
return n
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added from source and dest column: {}'.format(num_results))
# ------------------------------------------------------------------------
# Fill in more attributes. Add in edges with attributes from the line.
# wipe everything first:
session.run( MATCH (n) DETACH DELETE n )
# add the source nodes, if they don't already exist:
command =
LOAD CSV WITH HEADERS FROM
'https://raw.githubusercontent.com/JanetMatsen/Neo4j_meta4/master/data/network_broken_20.csv'
AS line
MERGE (g1:Gene {org_gene:line.source,
organism:line.source_organism,
gene:line.source_gene})
MERGE (g2:Gene {org_gene:line.target,
organism:line.target_organism,
gene:line.target_gene})
MERGE (g1) -[:X {weight:line.weight, association:line.association}]-> (g2);
result = session.run(command.rstrip())
# See how many nodes I got.
result = session.run( match(n) return n )
num_results = 0
for record in result:
num_results += 1
print(record)
print('number of nodes added from source and dest column: {}'.format(num_results))
# -----------------------------------------------------------------------
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'icon-esm-lr', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From now we try a range of estimators and use GridSearch to iteratively tune their hyperparameters
|
<ASSISTANT_TASK:>
Python Code:
#Drop quantitative features for which most samples take 0 or 1
for cols in quan:
if train_c[cols].mean() < 0.01 or train_c[cols].mean() > 0.99:
train_c.drop(cols, inplace=True, axis=1)
test_c.drop(cols, inplace=True, axis=1)
#For now we only use the quantitative features left to make predictions
quan_features = train_c.columns[8:-1]
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
import warnings
warnings.filterwarnings('ignore')
from sklearn.linear_model import Ridge
ridge = Ridge()
ridge_cv = GridSearchCV(estimator=ridge, param_grid={'alpha':np.arange(1, 50, 1)}, cv=5)
ridge_cv.fit(train_c[quan_features], train_c.label)
ridge_cv.best_score_
from sklearn.linear_model import Lasso
lasso = Lasso()
lasso_cv = GridSearchCV(estimator=lasso, param_grid={'alpha':np.arange(0, 0.05, 0.005)}, cv=5)
lasso_cv.fit(train_c[quan_features], train_c.label)
lasso_cv.best_score_
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
params = {'max_depth':np.arange(5,8),
'min_samples_split':np.arange(3, 6)}
rf_cv = GridSearchCV(estimator=rf, param_grid=params, cv=5)
rf_cv.fit(train_c[quan_features], train_c.label)
rf_cv.best_score_
from sklearn.linear_model import ElasticNet
en = ElasticNet()
params = {'alpha':np.arange(0.01, 0.05, 0.005),
'l1_ratio': np.arange(0.1, 0.9, 0.1)}
en_cv = GridSearchCV(estimator=en, param_grid=params, cv=5)
en_cv.fit(train_c[quan_features], train_c.label)
en_cv.best_score_
from mlxtend.regressor import StackingRegressor
from sklearn.linear_model import LinearRegression
lin=LinearRegression()
basic_regressors= [ridge_cv.best_estimator_, lasso_cv.best_estimator_,
rf_cv.best_estimator_, en_cv.best_estimator_]
stacker=StackingRegressor(regressors=basic_regressors, meta_regressor=lin)
stacker.fit(train_c[quan_features], train_c.label)
pred = stacker.predict(train_c[quan_features])
r2_score(train_c.label, pred)
result = pd.DataFrame()
result['ID']=test.ID
result['y']=stacker.predict(test_c[quan_features])
result.to_csv('./stackedprediction.csv', index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What is the average twitter tenure of people who tweeted about the wall?
Step2: There are a couple of users tweeting multiple times, but most tweets come from distinct twitter handles
Step3: Who are the 'top tweeters' + descriptions?
Step4: What is the reach of these tweets in terms of followers?
Step5: Location of the tweets?
|
<ASSISTANT_TASK:>
Python Code:
# Lowercase the hashtags and tweet body
df['hashtags'] = df['hashtags'].str.lower()
df['text'] = df['text'].str.lower()
print("Total number of tweets containing hashtag 'wall' = {}".format(len(df[df['hashtags'].str.contains('wall')])))
print("Total number of tweets whose body contains 'wall' = {}".format(len(df[df['text'].str.contains('wall')])))
wall_tweets = df[(df['hashtags'].str.contains('wall')) | (df['text'].str.contains('wall'))].copy()
print("Total number of tweets about the 'wall' = {}".format(len(wall_tweets)))
def months_between(end, start):
return (end.year - start.year)*12 + end.month - start.month
wall_tweets['created'] = pd.to_datetime(wall_tweets['created'])
wall_tweets['user_created'] = pd.to_datetime(wall_tweets['user_created'])
wall_tweets['user_tenure'] = wall_tweets[['created', \
'user_created']].apply(lambda row: months_between(row[0], row[1]), axis=1)
tenure_grouping = wall_tweets.groupby('user_tenure').size() / len(wall_tweets) * 100
fig, ax = plt.subplots()
ax.plot(tenure_grouping.index, tenure_grouping.values)
ax.set_ylabel("% of tweets")
ax.set_xlabel("Acct tenure in months")
plt.show()
tweets_per_user = wall_tweets.groupby('user_name').size().sort_values(ascending=False)
fig, ax = plt.subplots()
ax.plot(tweets_per_user.values)
plt.show()
wall_tweets.groupby(['user_name', 'user_description']).size().sort_values(ascending=False).head(20).to_frame()
plt.boxplot(wall_tweets['friends_count'].values, vert=False)
plt.show()
wall_tweets['friends_count'].describe()
wall_tweets.groupby('user_location').size().sort_values(ascending=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Contenido y estructura de los datos
Step2: Procesamiento de datos
Step3: Nota
Step4: Transformación logarítmica de la variable objetivo
Step5: Ingeniería de atributos (Features engineering)
Step6: Valores perdidos (Missing data)
Step7: Imputamos los valores perdidos según cada caso
Step8: Casos similares.
Step9: LotFrontage
Step10: GarageYrBlt, GarageArea y GarageCars
Step11: BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath, BsmtHalfBath
Step12: BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1, BsmtFinType2
Step13: MasVnrArea y MasVnrType
Step14: MSZoning (clasificación general de zonificación)
Step15: Utilities
Step16: Functional
Step17: Electrical
Step18: KitchenQual
Step19: Exterior1st y Exterior2nd
Step20: SaleType
Step21: MSSubClass
Step22: Quedan aún valores perdidos?
Step23: Ya no quedan valores perdidos.
Step24: Codifica en cifras los valores nominales de algunas variables categóricas, para un mejor desempeño
Step25: Adicionamos un atributo muy importante
Step26: Atributos sesgados
Step27: Transformación Box-Cox de atributos (altamente) sesgados.
Step28: Variables ficticias (dummies)
Step29: Nuevos conjuntos de datos de entrenamiento y comprobación.
Step30: Correlación de datos
Step31: Modelado
Step32: Estrategia de validación cruzada (cross-validation)
Step33: Modelos
Step34: Regresión LASSO
Step35: Regresión Elastic Net
Step36: Regresión con Kernel Ridge
Step37: Scores de los modelos
|
<ASSISTANT_TASK:>
Python Code:
# Librerías
import pandas as pd
import numpy as np
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, RidgeCV, LassoCV, ElasticNetCV
from sklearn.metrics import mean_squared_error, make_scorer
from scipy import stats
from scipy.stats import norm, skew
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
# Opciones
pd.set_option('display.float_format', lambda x: '%.3f' % x)
%matplotlib inline
# Para verificar los archivos contenidos en el directorio
from subprocess import check_output
print(check_output(["ls", "../python_regression/"]).decode("utf8"))
# Para leer los datos
train = pd.read_csv("train.csv")
print("train : " + str(train.shape))
test = pd.read_csv('test.csv')
print("test : " + str(test.shape))
## Despliega las primeras cinco filas del conjunto de datos de entrenamiento
train.head(5)
## Describe los datos de entrenamiento
train.describe()
# Verifica el número de observaciones y atributos
print("The train data size before dropping Id feature is : {} ".format(train.shape))
print("The test data size before dropping Id feature is : {} ".format(test.shape))
# Guarda la columna 'Id'
train_ID = train['Id']
test_ID = test['Id']
# Ahora prescinde de la columna 'Id', ya que es innecesaria para el proceso de predicción
train.drop("Id", axis = 1, inplace = True)
test.drop("Id", axis = 1, inplace = True)
# Verifica de nuevo el tamaño de los datos después de eliminar la variable 'Id'
print("\nThe train data size after dropping Id feature is : {} ".format(train.shape))
print("The test data size after dropping Id feature is : {} ".format(test.shape))
fig, ax = plt.subplots()
ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
plt.show()
# Eliminamos los outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
# Verificamos de nuevo el gráfico
fig, ax = plt.subplots()
ax.scatter(train['GrLivArea'], train['SalePrice'])
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
plt.show()
sns.distplot(train['SalePrice'] , fit=norm);
# Obtenemos los parámetros de ajuste utilizados por la función
(mu, sigma) = norm.fit(train['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# Graficamos la distribución empírica
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
# QQ-plot para comprobar la normalidad
fig = plt.figure()
res = stats.probplot(train['SalePrice'], plot=plt)
plt.show()
# Empleamos la función log1p de numpy para obtener el log(1+x) de todos los elementos de la variable objetivo
train["SalePrice"] = np.log1p(train["SalePrice"])
# Verificamos la nueva distribución empírica
sns.distplot(train['SalePrice'] , fit=norm);
# Nuevos parámetros
(mu, sigma) = norm.fit(train['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# Nuevo gráfico después de la transformación
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
# QQ-plot después de la transformación
fig = plt.figure()
res = stats.probplot(train['SalePrice'], plot=plt)
plt.show()
# Registramos la nueva variable target como y_train
y_train = train.SalePrice.values
ntrain = train.shape[0]
ntest = test.shape[0]
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("all_data size is : {}".format(all_data.shape))
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head(30)
f, ax = plt.subplots(figsize=(15, 12))
plt.xticks(rotation='0')
sns.barplot(x=all_data_na, y=all_data_na.index)
plt.xlabel('Features', fontsize=15)
plt.ylabel('Percent of missing values', fontsize=15)
plt.title('Percent missing data by feature', fontsize=15)
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
# Agrupamos por vecindario y completamos los valores perdidos con la mediana de LotFrontage para todos los vecindarios
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median()))
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
all_data = all_data.drop(['Utilities'], axis=1)
all_data["Functional"] = all_data["Functional"].fillna("Typ")
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None")
# Verificamos si permanecen aún valores perdidos
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head()
# MSSubClass = El tipo de edificios
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
# OverallCond
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
# Año y mes de venta.
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str)
from sklearn.preprocessing import LabelEncoder
cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
# procesa columnas, applicando LabelEncoder a los atributos categóricos
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
# Shape
print('Shape all_data: {}'.format(all_data.shape))
# Adicionamos el total de pies cuadrados (TotalSF) de la vivienda
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
# Verificamos el sesgo de todos los atributos numéricos
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
skewness.head(20)
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
#all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
# all_data[skewed_features] = np.log1p(all_data[skewed_features])
all_data = pd.get_dummies(all_data)
print(all_data.shape)
train = all_data[:ntrain]
test = all_data[ntrain:]
# Mapa para detectar qué atributos están correlacionados con el precio de venta
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True)
from sklearn.linear_model import LinearRegression, ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
# Función de validación cruzada con mezcla aleatoria previa
n_folds = 5
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train.values)
rmse= np.sqrt(-cross_val_score(model, train.values, y_train, scoring="neg_mean_squared_error", cv = kf))
return(rmse)
REG = make_pipeline(RobustScaler(), LinearRegression())
LASSO = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1))
ENet = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
score = rmsle_cv(REG)
print("Linear regression score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(LASSO)
print("Lasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(ENet)
print("ElasticNet score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(KRR)
print("Kernel Ridge score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
# Ajustamos el modelo con el mejor score a los datos y predecimos los valores
LASSO.fit(train, y_train)
y_pred = LASSO.predict(test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 한국어 처리 유틸리티
Step2: 형태소 분석
Step3: 명사 추출
Step4: 형태소 추출
Step5: 품사 태깅
|
<ASSISTANT_TASK:>
Python Code:
from konlpy.corpus import kolaw
kolaw.fileids()
c = kolaw.open('constitution.txt').read()
print(c[:100])
from konlpy.corpus import kobill
kobill.fileids()
d = kobill.open('1809890.txt').read()
print(d[:100])
x = [u"한글", {u"한글 키": [u"한글 밸류1", u"한글 밸류2"]}]
print(x)
from konlpy.utils import pprint
pprint(x)
from konlpy.utils import concordance
idx = concordance(u'대한민국', c, show=True)
idx
from konlpy.tag import *
hannanum = Hannanum()
kkma = Kkma()
twitter = Twitter()
pprint(hannanum.nouns(c[:65]))
pprint(kkma.nouns(c[:65]))
pprint(twitter.nouns(c[:65]))
pprint(hannanum.morphs(c[:65]))
pprint(kkma.morphs(c[:65]))
pprint(twitter.morphs(c[:65]))
pprint(hannanum.pos(c[:65]))
pprint(kkma.pos(c[:65]))
pprint(twitter.pos(c[:65]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Perplexity on Each Dataset
Step2: Loss vs. Epoch
Step3: Perplexity vs. Epoch
Step4: Generations
Step5: BLEU Analysis
Step6: N-pairs BLEU Analysis
Step7: Alignment Analysis
|
<ASSISTANT_TASK:>
Python Code:
report_file = '/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing23_bow_200_512_04drb/encdec_noing23_bow_200_512_04drb.json'
log_file = '/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing23_bow_200_512_04drb/encdec_noing23_bow_200_512_04drb_logs.json'
import json
import matplotlib.pyplot as plt
with open(report_file) as f:
report = json.loads(f.read())
with open(log_file) as f:
logs = json.loads(f.read())
print'Encoder: \n\n', report['architecture']['encoder']
print'Decoder: \n\n', report['architecture']['decoder']
print('Train Perplexity: ', report['train_perplexity'])
print('Valid Perplexity: ', report['valid_perplexity'])
print('Test Perplexity: ', report['test_perplexity'])
%matplotlib inline
for k in logs.keys():
plt.plot(logs[k][0], logs[k][1], label=str(k) + ' (train)')
plt.plot(logs[k][0], logs[k][2], label=str(k) + ' (valid)')
plt.title('Loss v. Epoch')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
%matplotlib inline
for k in logs.keys():
plt.plot(logs[k][0], logs[k][3], label=str(k) + ' (train)')
plt.plot(logs[k][0], logs[k][4], label=str(k) + ' (valid)')
plt.title('Perplexity v. Epoch')
plt.xlabel('Epoch')
plt.ylabel('Perplexity')
plt.legend()
plt.show()
def print_sample(sample, best_bleu=None):
enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>'])
gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>'])
print('Input: '+ enc_input + '\n')
print('Gend: ' + sample['generated'] + '\n')
print('True: ' + gold + '\n')
if best_bleu is not None:
cbm = ' '.join([w for w in best_bleu['best_match'].split(' ') if w != '<mask>'])
print('Closest BLEU Match: ' + cbm + '\n')
print('Closest BLEU Score: ' + str(best_bleu['best_score']) + '\n')
print('\n')
for i, sample in enumerate(report['train_samples']):
print_sample(sample, report['best_bleu_matches_train'][i] if 'best_bleu_matches_train' in report else None)
for i, sample in enumerate(report['valid_samples']):
print_sample(sample, report['best_bleu_matches_valid'][i] if 'best_bleu_matches_valid' in report else None)
for i, sample in enumerate(report['test_samples']):
print_sample(sample, report['best_bleu_matches_test'][i] if 'best_bleu_matches_test' in report else None)
def print_bleu(blue_struct):
print 'Overall Score: ', blue_struct['score'], '\n'
print '1-gram Score: ', blue_struct['components']['1']
print '2-gram Score: ', blue_struct['components']['2']
print '3-gram Score: ', blue_struct['components']['3']
print '4-gram Score: ', blue_struct['components']['4']
# Training Set BLEU Scores
print_bleu(report['train_bleu'])
# Validation Set BLEU Scores
print_bleu(report['valid_bleu'])
# Test Set BLEU Scores
print_bleu(report['test_bleu'])
# All Data BLEU Scores
print_bleu(report['combined_bleu'])
# Training Set BLEU n-pairs Scores
print_bleu(report['n_pairs_bleu_train'])
# Validation Set n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_valid'])
# Test Set n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_test'])
# Combined n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_all'])
# Ground Truth n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_gold'])
print 'Average (Train) Generated Score: ', report['average_alignment_train']
print 'Average (Valid) Generated Score: ', report['average_alignment_valid']
print 'Average (Test) Generated Score: ', report['average_alignment_test']
print 'Average (All) Generated Score: ', report['average_alignment_all']
print 'Average Gold Score: ', report['average_alignment_gold']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Global variables are shared between cells. Try executing the cell below
Step2: Keyboard Shortcuts
Step3: Basics of Python
Step4: Basic data types
Step5: Note that unlike many languages, Python does not have unary increment (x++) or decrement (x--) operators.
Step6: Now we let's look at the operations
Step7: Strings
Step8: String objects have a bunch of useful methods; for example
Step9: You can find a list of all string methods in the documentation.
Step10: As usual, you can find all the gory details about lists in the documentation.
Step11: Loops
Step12: If you want access to the index of each element within the body of a loop, use the built-in enumerate function
Step13: List comprehensions
Step14: You can make this code simpler using a list comprehension
Step15: List comprehensions can also contain conditions
Step16: Dictionaries
Step17: You can find all you need to know about dictionaries in the documentation.
Step18: Dictionary comprehensions
Step19: Sets
Step20: Loops
Step21: Set comprehensions
Step22: Tuples
Step23: Functions
Step24: We will often define functions to take optional keyword arguments, like this
Step25: Classes
Step26: Numpy
Step27: Arrays
Step28: Numpy also provides many functions to create arrays
Step29: Array indexing
Step30: A slice of an array is a view into the same data, so modifying it will modify the original array.
Step31: You can also mix integer indexing with slice indexing. However, doing so will yield an array of lower rank than the original array. Note that this is quite different from the way that MATLAB handles array slicing
Step32: Two ways of accessing the data in the middle row of the array.
Step33: Integer array indexing
Step34: One useful trick with integer array indexing is selecting or mutating one element from each row of a matrix
Step35: Boolean array indexing
Step36: For brevity we have left out a lot of details about numpy array indexing; if you want to know more you should read the documentation.
Step37: You can read all about numpy datatypes in the documentation.
Step38: Note that unlike MATLAB, * is elementwise multiplication, not matrix multiplication. We instead use the dot function to compute inner products of vectors, to multiply a vector by a matrix, and to multiply matrices. dot is available both as a function in the numpy module and as an instance method of array objects
Step39: You can also use the @ operator which is equivalent to numpy's dot operator.
Step40: Numpy provides many useful functions for performing computations on arrays; one of the most useful is sum
Step41: You can find the full list of mathematical functions provided by numpy in the documentation.
Step42: Broadcasting
Step43: This works; however when the matrix x is very large, computing an explicit loop in Python could be slow. Note that adding the vector v to each row of the matrix x is equivalent to forming a matrix vv by stacking multiple copies of v vertically, then performing elementwise summation of x and vv. We could implement this approach like this
Step44: Numpy broadcasting allows us to perform this computation without actually creating multiple copies of v. Consider this version, using broadcasting
Step45: The line y = x + v works even though x has shape (4, 3) and v has shape (3,) due to broadcasting; this line works as if v actually had shape (4, 3), where each row was a copy of v, and the sum was performed elementwise.
Step46: Broadcasting typically makes your code more concise and faster, so you should strive to use it where possible.
Step47: By running this special iPython command, we will be displaying plots inline
Step48: Plotting
Step49: With just a little bit of extra work we can easily plot multiple lines at once, and add a title, legend, and axis labels
Step50: Subplots
|
<ASSISTANT_TASK:>
Python Code:
x = 1
print(x)
y = 2 * x
print(y)
!python --version
def quicksort(arr):
if len(arr) <= 1:
return arr
pivot = arr[len(arr) // 2]
left = [x for x in arr if x < pivot]
middle = [x for x in arr if x == pivot]
right = [x for x in arr if x > pivot]
return quicksort(left) + middle + quicksort(right)
print(quicksort([3,6,8,10,1,2,1]))
x = 3
print(x, type(x))
print(x + 1) # Addition
print(x - 1) # Subtraction
print(x * 2) # Multiplication
print(x ** 2) # Exponentiation
x += 1
print(x)
x *= 2
print(x)
y = 2.5
print(type(y))
print(y, y + 1, y * 2, y ** 2)
t, f = True, False
print(type(t))
print(t and f) # Logical AND;
print(t or f) # Logical OR;
print(not t) # Logical NOT;
print(t != f) # Logical XOR;
hello = 'hello' # String literals can use single quotes
world = "world" # or double quotes; it does not matter
print(hello, len(hello))
hw = hello + ' ' + world # String concatenation
print(hw)
hw12 = '{} {} {}'.format(hello, world, 12) # string formatting
print(hw12)
s = "hello"
print(s.capitalize()) # Capitalize a string
print(s.upper()) # Convert a string to uppercase; prints "HELLO"
print(s.rjust(7)) # Right-justify a string, padding with spaces
print(s.center(7)) # Center a string, padding with spaces
print(s.replace('l', '(ell)')) # Replace all instances of one substring with another
print(' world '.strip()) # Strip leading and trailing whitespace
xs = [3, 1, 2] # Create a list
print(xs, xs[2])
print(xs[-1]) # Negative indices count from the end of the list; prints "2"
xs[2] = 'foo' # Lists can contain elements of different types
print(xs)
xs.append('bar') # Add a new element to the end of the list
print(xs)
x = xs.pop() # Remove and return the last element of the list
print(x, xs)
nums = list(range(5)) # range is a built-in function that creates a list of integers
print(nums) # Prints "[0, 1, 2, 3, 4]"
print(nums[2:4]) # Get a slice from index 2 to 4 (exclusive); prints "[2, 3]"
print(nums[2:]) # Get a slice from index 2 to the end; prints "[2, 3, 4]"
print(nums[:2]) # Get a slice from the start to index 2 (exclusive); prints "[0, 1]"
print(nums[:]) # Get a slice of the whole list; prints ["0, 1, 2, 3, 4]"
print(nums[:-1]) # Slice indices can be negative; prints ["0, 1, 2, 3]"
nums[2:4] = [8, 9] # Assign a new sublist to a slice
print(nums) # Prints "[0, 1, 8, 9, 4]"
animals = ['cat', 'dog', 'monkey']
for animal in animals:
print(animal)
animals = ['cat', 'dog', 'monkey']
for idx, animal in enumerate(animals):
print('#{}: {}'.format(idx + 1, animal))
nums = [0, 1, 2, 3, 4]
squares = []
for x in nums:
squares.append(x ** 2)
print(squares)
nums = [0, 1, 2, 3, 4]
squares = [x ** 2 for x in nums]
print(squares)
nums = [0, 1, 2, 3, 4]
even_squares = [x ** 2 for x in nums if x % 2 == 0]
print(even_squares)
d = {'cat': 'cute', 'dog': 'furry'} # Create a new dictionary with some data
print(d['cat']) # Get an entry from a dictionary; prints "cute"
print('cat' in d) # Check if a dictionary has a given key; prints "True"
d['fish'] = 'wet' # Set an entry in a dictionary
print(d['fish']) # Prints "wet"
print(d['monkey']) # KeyError: 'monkey' not a key of d
print(d.get('monkey', 'N/A')) # Get an element with a default; prints "N/A"
print(d.get('fish', 'N/A')) # Get an element with a default; prints "wet"
del d['fish'] # Remove an element from a dictionary
print(d.get('fish', 'N/A')) # "fish" is no longer a key; prints "N/A"
d = {'person': 2, 'cat': 4, 'spider': 8}
for animal, legs in d.items():
print('A {} has {} legs'.format(animal, legs))
nums = [0, 1, 2, 3, 4]
even_num_to_square = {x: x ** 2 for x in nums if x % 2 == 0}
print(even_num_to_square)
animals = {'cat', 'dog'}
print('cat' in animals) # Check if an element is in a set; prints "True"
print('fish' in animals) # prints "False"
animals.add('fish') # Add an element to a set
print('fish' in animals)
print(len(animals)) # Number of elements in a set;
animals.add('cat') # Adding an element that is already in the set does nothing
print(len(animals))
animals.remove('cat') # Remove an element from a set
print(len(animals))
animals = {'cat', 'dog', 'fish'}
for idx, animal in enumerate(animals):
print('#{}: {}'.format(idx + 1, animal))
from math import sqrt
print({int(sqrt(x)) for x in range(30)})
d = {(x, x + 1): x for x in range(10)} # Create a dictionary with tuple keys
t = (5, 6) # Create a tuple
print(type(t))
print(d[t])
print(d[(1, 2)])
t[0] = 1
def sign(x):
if x > 0:
return 'positive'
elif x < 0:
return 'negative'
else:
return 'zero'
for x in [-1, 0, 1]:
print(sign(x))
def hello(name, loud=False):
if loud:
print('HELLO, {}'.format(name.upper()))
else:
print('Hello, {}!'.format(name))
hello('Bob')
hello('Fred', loud=True)
class Greeter:
# Constructor
def __init__(self, name):
self.name = name # Create an instance variable
# Instance method
def greet(self, loud=False):
if loud:
print('HELLO, {}'.format(self.name.upper()))
else:
print('Hello, {}!'.format(self.name))
g = Greeter('Fred') # Construct an instance of the Greeter class
g.greet() # Call an instance method; prints "Hello, Fred"
g.greet(loud=True) # Call an instance method; prints "HELLO, FRED!"
import numpy as np
a = np.array([1, 2, 3]) # Create a rank 1 array
print(type(a), a.shape, a[0], a[1], a[2])
a[0] = 5 # Change an element of the array
print(a)
b = np.array([[1,2,3],[4,5,6]]) # Create a rank 2 array
print(b)
print(b.shape)
print(b[0, 0], b[0, 1], b[1, 0])
a = np.zeros((2,2)) # Create an array of all zeros
print(a)
b = np.ones((1,2)) # Create an array of all ones
print(b)
c = np.full((2,2), 7) # Create a constant array
print(c)
d = np.eye(2) # Create a 2x2 identity matrix
print(d)
e = np.random.random((2,2)) # Create an array filled with random values
print(e)
import numpy as np
# Create the following rank 2 array with shape (3, 4)
# [[ 1 2 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
# Use slicing to pull out the subarray consisting of the first 2 rows
# and columns 1 and 2; b is the following array of shape (2, 2):
# [[2 3]
# [6 7]]
b = a[:2, 1:3]
print(b)
print(a[0, 1])
b[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1]
print(a[0, 1])
# Create the following rank 2 array with shape (3, 4)
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(a)
row_r1 = a[1, :] # Rank 1 view of the second row of a
row_r2 = a[1:2, :] # Rank 2 view of the second row of a
row_r3 = a[[1], :] # Rank 2 view of the second row of a
print(row_r1, row_r1.shape)
print(row_r2, row_r2.shape)
print(row_r3, row_r3.shape)
# We can make the same distinction when accessing columns of an array:
col_r1 = a[:, 1]
col_r2 = a[:, 1:2]
print(col_r1, col_r1.shape)
print()
print(col_r2, col_r2.shape)
a = np.array([[1,2], [3, 4], [5, 6]])
# An example of integer array indexing.
# The returned array will have shape (3,) and
print(a[[0, 1, 2], [0, 1, 0]])
# The above example of integer array indexing is equivalent to this:
print(np.array([a[0, 0], a[1, 1], a[2, 0]]))
# When using integer array indexing, you can reuse the same
# element from the source array:
print(a[[0, 0], [1, 1]])
# Equivalent to the previous integer array indexing example
print(np.array([a[0, 1], a[0, 1]]))
# Create a new array from which we will select elements
a = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
print(a)
# Create an array of indices
b = np.array([0, 2, 0, 1])
# Select one element from each row of a using the indices in b
print(a[np.arange(4), b]) # Prints "[ 1 6 7 11]"
# Mutate one element from each row of a using the indices in b
a[np.arange(4), b] += 10
print(a)
import numpy as np
a = np.array([[1,2], [3, 4], [5, 6]])
bool_idx = (a > 2) # Find the elements of a that are bigger than 2;
# this returns a numpy array of Booleans of the same
# shape as a, where each slot of bool_idx tells
# whether that element of a is > 2.
print(bool_idx)
# We use boolean array indexing to construct a rank 1 array
# consisting of the elements of a corresponding to the True values
# of bool_idx
print(a[bool_idx])
# We can do all of the above in a single concise statement:
print(a[a > 2])
x = np.array([1, 2]) # Let numpy choose the datatype
y = np.array([1.0, 2.0]) # Let numpy choose the datatype
z = np.array([1, 2], dtype=np.int64) # Force a particular datatype
print(x.dtype, y.dtype, z.dtype)
x = np.array([[1,2],[3,4]], dtype=np.float64)
y = np.array([[5,6],[7,8]], dtype=np.float64)
# Elementwise sum; both produce the array
print(x + y)
print(np.add(x, y))
# Elementwise difference; both produce the array
print(x - y)
print(np.subtract(x, y))
# Elementwise product; both produce the array
print(x * y)
print(np.multiply(x, y))
# Elementwise division; both produce the array
# [[ 0.2 0.33333333]
# [ 0.42857143 0.5 ]]
print(x / y)
print(np.divide(x, y))
# Elementwise square root; produces the array
# [[ 1. 1.41421356]
# [ 1.73205081 2. ]]
print(np.sqrt(x))
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
v = np.array([9,10])
w = np.array([11, 12])
# Inner product of vectors; both produce 219
print(v.dot(w))
print(np.dot(v, w))
print(v @ w)
# Matrix / vector product; both produce the rank 1 array [29 67]
print(x.dot(v))
print(np.dot(x, v))
print(x @ v)
# Matrix / matrix product; both produce the rank 2 array
# [[19 22]
# [43 50]]
print(x.dot(y))
print(np.dot(x, y))
print(x @ y)
x = np.array([[1,2],[3,4]])
print(np.sum(x)) # Compute sum of all elements; prints "10"
print(np.sum(x, axis=0)) # Compute sum of each column; prints "[4 6]"
print(np.sum(x, axis=1)) # Compute sum of each row; prints "[3 7]"
print(x)
print("transpose\n", x.T)
v = np.array([[1,2,3]])
print(v )
print("transpose\n", v.T)
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = np.empty_like(x) # Create an empty matrix with the same shape as x
# Add the vector v to each row of the matrix x with an explicit loop
for i in range(4):
y[i, :] = x[i, :] + v
print(y)
vv = np.tile(v, (4, 1)) # Stack 4 copies of v on top of each other
print(vv) # Prints "[[1 0 1]
# [1 0 1]
# [1 0 1]
# [1 0 1]]"
y = x + vv # Add x and vv elementwise
print(y)
import numpy as np
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = x + v # Add v to each row of x using broadcasting
print(y)
# Compute outer product of vectors
v = np.array([1,2,3]) # v has shape (3,)
w = np.array([4,5]) # w has shape (2,)
# To compute an outer product, we first reshape v to be a column
# vector of shape (3, 1); we can then broadcast it against w to yield
# an output of shape (3, 2), which is the outer product of v and w:
print(np.reshape(v, (3, 1)) * w)
# Add a vector to each row of a matrix
x = np.array([[1,2,3], [4,5,6]])
# x has shape (2, 3) and v has shape (3,) so they broadcast to (2, 3),
# giving the following matrix:
print(x + v)
# Add a vector to each column of a matrix
# x has shape (2, 3) and w has shape (2,).
# If we transpose x then it has shape (3, 2) and can be broadcast
# against w to yield a result of shape (3, 2); transposing this result
# yields the final result of shape (2, 3) which is the matrix x with
# the vector w added to each column. Gives the following matrix:
print((x.T + w).T)
# Another solution is to reshape w to be a row vector of shape (2, 1);
# we can then broadcast it directly against x to produce the same
# output.
print(x + np.reshape(w, (2, 1)))
# Multiply a matrix by a constant:
# x has shape (2, 3). Numpy treats scalars as arrays of shape ();
# these can be broadcast together to shape (2, 3), producing the
# following array:
print(x * 2)
import matplotlib.pyplot as plt
%matplotlib inline
# Compute the x and y coordinates for points on a sine curve
x = np.arange(0, 3 * np.pi, 0.1)
y = np.sin(x)
# Plot the points using matplotlib
plt.plot(x, y)
y_sin = np.sin(x)
y_cos = np.cos(x)
# Plot the points using matplotlib
plt.plot(x, y_sin)
plt.plot(x, y_cos)
plt.xlabel('x axis label')
plt.ylabel('y axis label')
plt.title('Sine and Cosine')
plt.legend(['Sine', 'Cosine'])
# Compute the x and y coordinates for points on sine and cosine curves
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
# Set up a subplot grid that has height 2 and width 1,
# and set the first such subplot as active.
plt.subplot(2, 1, 1)
# Make the first plot
plt.plot(x, y_sin)
plt.title('Sine')
# Set the second subplot as active, and make the second plot.
plt.subplot(2, 1, 2)
plt.plot(x, y_cos)
plt.title('Cosine')
# Show the figure.
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This version uses prior distributions to do all the work. H and h are both informative priors that then drive the solution to the right answer.
Step2: Example 2
Step3: Example 3
Step4: Example 4
Step5: Example 5
Step6: Acceleration due to graving example
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pymc3 as pm
import seaborn as sns
import arviz as ar
sns.set(font_scale=1.5)
%matplotlib inline
with pm.Model() as model:
H = pm.Normal('H', 2.00, sigma=0.03)
h = pm.Normal('h', 0.88, sigma=0.04)
Q = pm.Deterministic('Q', H-h)
trace = pm.sample(10000)
with model:
print(pm.summary(trace).round(3))
with model:
pm.traceplot(trace, combined=False)
print("MCMC gives {:.2f} +/- {:.2f}, analytic gives {} +/- {}".format(trace['Q'].mean(),
trace['Q'].std(), 1.12, 0.05))
with model:
pm.plot_posterior(trace)
with pm.Model() as model:
d = pm.Normal('d', 123, tau=(3)**-2)
t = pm.Normal('t', 20.0, tau=(1.2)**-2)
v = pm.Deterministic('v', d/t)
trace = pm.sample(40000, chains=4)
with model:
print(pm.summary(trace).round(3))
with model:
pm.traceplot(trace, combined=False, lines=[('d', {}, 123), ('t', {}, 20), ('v', {}, 6)])
print("MCMC gives {0:.2f}, analytic gives {1}".format(trace['v'].std(), 0.39))
with pm.Model() as model:
T = pm.Normal('T', 0.20, tau=(0.01)**-2)
pm.Deterministic('1/T', 1/T)
trace = pm.sample(10000, tune=1000)
pm.traceplot(trace, combined=False)
pm.summary(trace).round(3)
print("MCMC gives {0:.1f} +/- {1:.1f}, analytic gives {2} +/- {3}".format(np.mean(trace['1/T']),
np.std(trace['1/T']),
5.0, 0.3))
with pm.Model() as model:
g = 9.80
t = pm.Normal('t', 0.60, tau=(0.06)**-2)
v0 = pm.Normal('v0', 4.0, tau=(0.2)**-2)
h = pm.Deterministic('h', v0*t - 0.5*g*t**2)
trace = pm.sample(10000)
pm.traceplot(trace, combined=False)
pm.summary(trace).round(3)
print("MCMC gives {0:.1f} +/- {1:.1f}, analytic gives {2} +/- {3}".format(np.mean(trace['h']),
np.std(trace['h']),
0.6, 0.4))
with pm.Model() as model:
A = pm.Normal('A', 3.6, tau=(0.2)**-2)
B = pm.Normal('B', 3.3, tau=(0.3)**-2)
D = pm.Deterministic('D', A-B)
trace = pm.sample(1000, chains=6)
pm.summary(trace).round(3)
pm.traceplot(trace, combined=False);
print("MCMC gives {0:.1f} +/- {1:.1f}, analytic gives {2} +/- {3}".format(np.mean(trace['D']),
np.std(trace['D']),
0.3, 0.4))
data = [.22, .23, .21, .22]
with pm.Model() as model:
h = pm.Normal('h', 1.0, sigma=0.01)
t = pm.Normal('t', 2.2, sigma=1, observed=data)
g = pm.Deterministic('g', 2*h/t**2)
trace = pm.sample(10000)
with model:
pm.plot_posterior(trace, com==)
pm.plot_posterior?
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install TensorFlow for C
Step2: Linker
Step3: If you extract the TensorFlow C library to a non-system directory, such as
Step4: Compile
Step5: Success
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
%%bash
FILENAME=libtensorflow-cpu-linux-x86_64-2.8.0.tar.gz
wget -q --no-check-certificate https://storage.googleapis.com/tensorflow/libtensorflow/${FILENAME}
sudo tar -C /usr/local -xzf ${FILENAME}
%%bash
sudo ldconfig /usr/local/lib
%%writefile hello_tf.c
#include <stdio.h>
#include <tensorflow/c/c_api.h>
int main() {
printf("Hello from TensorFlow C library version %s\n", TF_Version());
return 0;
}
%%bash
gcc hello_tf.c -ltensorflow -o hello_tf
./hello_tf
%%bash
gcc -I/usr/local/include -L/usr/local/lib hello_tf.c -ltensorflow -o hello_tf
./hello_tf
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Après chaque question, on vérifie sur un petit exemple que cela fonctionne comme attendu.
Step2: Q2
Step3: Q3
Step4: Parfois ce calcul entre dans une boucle infinie
Step5: Remarque
Step6: Même dans ce cas, on ne tombe pas exactement sur 20 ans. Il faudrait avoir une valeur pour step variable pour traiter tous les cas. Par exemple, dès que le nombre de mois augmente de plus de 1, on divise $K$ par 2. La précision serait au mois près.
Step7: Q5
Step8: Q6
Step9: Q7 exo 1
Step10: Q7 exo 2
Step11: Q8
Step12: Le paramètre dk donne une indication de la précision. On compare les temps d'exécution
Step13: Exercice 2
Step14: Q2
Step15: Exercice 3
Step16: Q2
Step17: Remarque
Step18: Comme l'exemple le stipule, cela survient lorsqu'on essaye de faire une comparison numérique entre une chaîne de caractères et un type numérique. Il faut convertir la chaîne de caractères.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from jyquickhelper import add_notebook_menu
add_notebook_menu()
def rendement(x, n, r):
return x*(1+r)**n
rendement(1, 2, 0.02)
rendement(1, 3, 0.02)
def decompose_mensualite(K,M,p):
i = K * ((1+p)**(1.0/12)-1)
return M-i, i
decompose_mensualite(180000, 1000, 0.029)
def mensualites(K,M,p):
res = []
while K > 0:
cap, i = decompose_mensualite(K,M,p)
if cap < 0:
raise Exception("problème avec K={0} cap={1} i={2} len={3}".format(K,cap,i,len(res)))
K -= cap
if K < 0:
res.append(M + K)
else:
res.append(M)
return res
len(mensualites(180000,1000,0.029))/12
mens = mensualites(180000,1000,0.029)
mens[:12]
mens[-6:]
def somme_maximale(M,p,Y):
K = 20000
l = mensualites(K, M, p)
while len(l) < Y*12:
K += 1000
l = mensualites(K, M, p)
return K
somme_maximale(1000, 0.029, 20)
def somme_maximale_step(M,p,Y,step=10000):
K = 20000
l = mensualites(K, M, p)
while len(l) < Y*12:
K += step
l = mensualites(K, M, p)
if len(l) >= (Y-3)*10:
print("K", K,"mois", len(l), "années", len(l)//12)
return K
somme_maximale_step(1000, 0.029, 20)
somme_maximale_step(1000, 0.029, 20, step=1000)
def somme_maximale_mois_step(M,p,Y,step=10000):
K = 20000
l = mensualites(K, M, p)
l0 = l
while len(l) < Y*12:
while True:
l = mensualites(K + step, M, p)
if len(l) > len(l0) + 1:
step /= 2
else:
K += step
l0 = l
break
if len(l) >= (Y-1)*12:
print("K", K,"mois", len(l), "années", len(l)//12, "step", step)
return K
somme_maximale_mois_step(1000, 0.029, 20)
def economie(A,S,L,r,Y):
delta = A - S*L
rm = ((1+r)**(1.0/12)-1)
eco = 0
nbm = Y*12
while nbm > 0:
eco = eco * (1+rm) + delta
nbm -= 1
return eco
economie(1000,40,20,0.015,10)
from pyquickhelper.helpgen import NbImage
NbImage("exam2016_values.png")
def bascule(A,S,L,r,Y,C,p):
Y = 0
possible = C*S
while possible > 0:
Y += 1
eco = economie(A,S,L,r,Y)
somme = somme_maximale(A,p,Y)
possible = C*S - somme - eco
return Y
bascule(1000,40,20,0.015,20,8000,0.029)
def surface_max(A,L,r,Y,C,p,delay=20):
S = 1
wait = bascule(A,S,L,r,Y,C,p)
while wait < delay:
S += 1
wait = bascule(A,S,L,r,Y,C,p)
return S
surface_max(1000,20,0.015,20,8000,0.029)
def A40a30(L,r,Y,C,p):
A = 10000
S = 40
wait = bascule(A,S,L,r,Y,C,p)
while wait < 7:
A -= 100
wait = bascule(A,S,L,r,Y,C,p)
return A
A40a30(20,0.015,20,8000,0.029)
def somme_maximale_dicho(M,p,Y):
K_max = M / ((1+p)**(1.0/12)-1)
K = K_max / 2
step = 0.5
dk = K * step
l = mensualites(K, M, p)
while len(l) != Y*12 and dk > 1e-5:
if len(l) < Y*12:
K += dk
K = min(K_max - 1000, K)
else:
K -= dk
dk *= step ###
l = mensualites(K, M, p)
if len(l) != Y*12:
raise Exception("il faut augmenter step")
return K
somme_maximale_dicho(1000, 0.029, 20)
%timeit somme_maximale(1000, 0.029, 20)
%timeit somme_maximale_dicho(1000, 0.029, 20)
def transforme_numero(prenom, numero):
res = numero[:2]
for i, c in enumerate(prenom):
if c in "aeiouy":
res += numero[i*2+3] + numero[i*2+2]
else:
res += numero[i*2+2:i*2+4]
if len(res) >= len(numero):
break
return res
transforme_numero("adele", "0664342267")
transforme_numero("adele", "0646342267")
def transforme_numero(prenom, numero):
res = numero[:2]
for i, c in enumerate(prenom):
if c in "aeiouy":
res += numero[i*2+2] + str ( (int(numero[i*2+2]) + int(numero[i*2+3])) % 10)
else:
res += numero[i*2+2:i*2+4]
if len(res) >= len(numero):
break
return res
transforme_numero("adele", "0664342267")
def transforme_numero_envers(prenom, numero):
res = numero[:2]
for i, c in enumerate(prenom):
if c in "aeiouy":
res += numero[i*2+2] + str ( (int(numero[i*2+3]) - int(numero[i*2+2]) + 10) % 10)
else:
res += numero[i*2+2:i*2+4]
if len(res) >= len(numero):
break
return res
transforme_numero_envers("adele", "0660342467")
# déclenche une exception
"3" < 4
int("3") < 4
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The former was both a Closure and a High Order Function. Did you heard about Functional Programming?
Step2: The previous was the built-in "Decorator" syntax.
|
<ASSISTANT_TASK:>
Python Code:
"Elapsed decorator."
import datetime
def elapsed(func):
"Elapsed decorator"
def _wrapper(*args, **kwargs):
"Decoration function"
start = datetime.datetime.now()
ret = func(*args, **kwargs)
print("Elapsed time", datetime.datetime.now() - start)
return ret
return _wrapper
# Usage:
import time
@elapsed
def task():
"A difficult task."
print("Processing...")
time.sleep(2)
task()
class Duck:
def quack(self):
print("Just a crazy, darn fool duck!")
class Man:
def quack(self):
print("Are you crazy?!")
def porky_pig_shoots_a(quacker):
quacker.quack()
duffy = Duck()
cesare = Man()
porky_pig_shoots_a(duffy)
porky_pig_shoots_a(cesare)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'ec-earth3-lr', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.
Step2: We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input.
Step3: Training
Step4: Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss.
Step5: Checking out the results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
# Size of the encoding layer (the hidden layer)
encoding_dim = 32 # feel free to change this value
image_size = mnist.train.images.shape[1]
# Input and target placeholders
inputs_ = tf.placeholder(tf.float32, (None, image_size), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, image_size), name='targets')
# Output of hidden layer, single fully connected layer here with ReLU activation
encoded = tf.layers.dense(inputs_, encoding_dim, activation=tf.nn.relu)
# Output layer logits, fully connected layer with no activation
logits = tf.layers.dense(encoded, image_size, activation=None)
# Sigmoid output from logits
decoded = tf.nn.sigmoid(logits, name='output')
# Sigmoid cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Mean of the loss
cost = tf.reduce_mean(loss)
# Adam optimizer
opt = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
# Create the session
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
feed = {inputs_: batch[0], targets_: batch[0]}
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MBB Beam (Figure 2 from paper)
Step2: If desired, designs can also be converted into PIL.Image objects with the pipeline_utils.image_from_design utility. This is convenient for exporting single frames or creating GIFs
Step3: This produces the following movie of the optimization process
Step4: Now, the CNN-LBFGS model performs best
Step5: Multistory building
Step6: For fun, we'll examine designs at different time-steps.
Step7: Thin support bridge
|
<ASSISTANT_TASK:>
Python Code:
# Install dependencies with pip. Only run this once.
! pip install -q tf-nightly git+https://github.com/google-research/neural-structural-optimization.git
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from IPython import display
from PIL import Image
import seaborn
import matplotlib.pyplot as plt
import xarray
import pandas as pd
from neural_structural_optimization import pipeline_utils
from neural_structural_optimization import problems
from neural_structural_optimization import models
from neural_structural_optimization import topo_api
from neural_structural_optimization import train
def train_all(problem, max_iterations, cnn_kwargs=None):
args = topo_api.specified_task(problem)
if cnn_kwargs is None:
cnn_kwargs = {}
model = models.PixelModel(args=args)
ds_mma = train.method_of_moving_asymptotes(model, max_iterations)
model = models.PixelModel(args=args)
ds_oc = train.optimality_criteria(model, max_iterations)
model = models.PixelModel(args=args)
ds_pix = train.train_lbfgs(model, max_iterations)
model = models.CNNModel(args=args, **cnn_kwargs)
ds_cnn = train.train_lbfgs(model, max_iterations)
dims = pd.Index(['cnn-lbfgs', 'mma', 'oc', 'pixel-lbfgs'], name='model')
return xarray.concat([ds_cnn, ds_mma, ds_oc, ds_pix], dim=dims)
problem = problems.mbb_beam(height=20, width=60)
max_iterations = 200
# can't upscale by exactly 8x for a 60x20 design region, so upscale by
# only 4x instead
%time ds = train_all(problem, max_iterations, cnn_kwargs=dict(resizes=(1, 1, 2, 2, 1)))
ds
ds.loss.transpose().to_pandas().cummin().loc[:200].plot(linewidth=2)
plt.ylim(215, 260)
plt.ylabel('Compliance (loss)')
plt.xlabel('Optimization step')
seaborn.despine()
ds.design.sel(step=200).plot.imshow(
col='model', x='x', y='y', size=2, aspect=2.5, col_wrap=2,
yincrease=False, add_colorbar=False, cmap='Greys')
def save_gif_movie(images, path, duration=200, loop=0, **kwargs):
images[0].save(path, save_all=True, append_images=images[1:],
duration=duration, loop=loop, **kwargs)
images = [
pipeline_utils.image_from_design(design, problem)
for design in ds.design.sel(model='cnn-lbfgs')[:100]
]
save_gif_movie([im.resize((5*120, 5*20)) for im in images], 'movie.gif')
problem = problems.PROBLEMS_BY_NAME['mbb_beam_192x64_0.4']
max_iterations = 100
%time ds = train_all(problem, max_iterations)
ds.loss.transpose().to_pandas().cummin().loc[:200].plot(linewidth=2)
plt.ylim(230, 330)
plt.ylabel('Compliance (loss)')
plt.xlabel('Optimization step')
seaborn.despine()
# the pixel-lbfgs does not run for the full 100 steps (it terminates
# early due to reaching a local minima), so use fill() to forward fill
# to the last valid design.
ds.design.ffill('step').sel(step=100).plot.imshow(
col='model', x='x', y='y', size=2, aspect=2.5, col_wrap=2,
yincrease=False, add_colorbar=False, cmap='Greys')
plt.suptitle(problem.name, y=1.02);
problem = problems.PROBLEMS_BY_NAME['multistory_building_64x128_0.4']
max_iterations = 100 # keep things fast
%time ds = train_all(problem, max_iterations)
ds.loss.transpose().to_pandas().cummin().loc[:100].plot(linewidth=2)
plt.ylim(40, 100)
plt.ylabel('Compliance (loss)')
plt.xlabel('Optimization step')
seaborn.despine()
plt.suptitle(problem.name);
ds.design.sel(step=[0, 1, 2, 5, 10, 20, 50, 100]).plot.imshow(
row='model', col='step', x='x', y='y', size=2, aspect=0.5,
yincrease=False, add_colorbar=False, cmap='Greys')
plt.subplots_adjust(wspace=0.1, hspace=0.05)
plt.suptitle(problem.name, y=1.02);
# we really need more iterations to see the CNN-LBFGS method dominate
problem = problems.PROBLEMS_BY_NAME['thin_support_bridge_128x128_0.2']
max_iterations = 200
%time ds = train_all(problem, max_iterations)
ds.loss.transpose().to_pandas().cummin().plot(linewidth=2)
plt.ylim(70, 120)
plt.ylabel('Compliance (loss)')
plt.xlabel('Optimization step')
seaborn.despine()
plt.suptitle(problem.name);
(xarray.concat([ds.design, ds.design.sel(x=slice(None, None, -1))] * 2, dim='x')
.sel(step=200)
.plot.imshow(
col='model', x='x', y='y', size=2.5, aspect=2, col_wrap=2,
yincrease=False, add_colorbar=False, cmap='Greys'))
plt.suptitle(problem.name, y=1.02);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set-up
Step2: BatchRequests
Step3: Example 1
Step4: To continue our example, we will continue building a RuleBasedProfiler using our ColumnDomainBuilder
Step5: Create RuleBasedProfiler and add Rule
Step6: As expected our simple RuleBasedProfiler will output 4 Expectations, one for each of our 4 columns.
Step7: Build a ParameterBuilder
Step8: Build an ExpectationConfigurationBuilder
Step9: Build a Rule, RuleBasedProfiler, and run
Step10: Add the Rule to our RuleBasedProfiler and run.
Step11: The resulting ExpectationSuite now contain values (-80.0, 0.0 etc) that were calculated from the Batch of data defined by the BatchRequest.
Step12: Instantiating RuleBasedProfiler with variables
Step13: Instantiating ColumnDomainBuilder
Step14: Instantiating ParameterBuilders
Step15: Instantiating ExpectationConfigurationBuilders
Step16: Instantiating RuleBasedProfiler and Running
Step17: As expected, the resulting ExpectationSuite contains our minimum and maximum values, with tip_amount ranging from $-2.16 to $195.05 (a generous tip), and fare_amount ranging from $-98.90 (a refund) to $405,904.54 (a very very long trip).
Step18: In the simplest usecase, the ColumnDomainBuilder can output all of the columns in yellow_tripdata_sample_2018
Step19: Columns can also be included or excluded by name
Step20: As described above, the ColumnDomainBuilder also allows you to choose columns based on their semantic types (such as numeric, or text). This is passed in as part of the include_semantic_types parameter.
Step21: MultiColumnDomainBuilder
Step22: ColumnPairDomainBuilder
Step23: TableDomainBuilder
Step24: MapMetricColumnDomainBuilder
Step25: CategoricalColumnDomainBuilder
Step26: ParameterBuilders
Step27: MetricMultiBatchParameterBuilder
Step28: my_column_min[value] now contains a list of 12 values, which are the minimum values the total_amount column for each of the 12 Batches associated with 2018 taxi_data data. If we were to use the values in a ExpectationConfigurationBuilder, it would be accessible through the fully-qualified parameter
Step29: my_value_set[value] now contains a list of 3 values, which is a list of all unique vendor_ids across 12 Batches in the 2018 taxi_data dataset.
Step30: vendor_id is a single integer. Let's see if our default patterns can match it.
Step31: Looks like my_regex_set[value] is an empty list. This means that none of the evaluated_regexes matched our domain. Let's try the same thing again, but this time with a regex that will match our vendor_id column. ^\\d{1}$ and ^\\d{2}$ which will match 1 or 2 digit integers anchored at the beginning and end of the string.
Step32: Now my_regex_set[value] contains ^\\d{1}$.
Step33: The result contains our matching datetime pattern, which is '%Y-%m-%d %H
Step34: As we see, the mean value range for the total_amount column is 16.0 to 44.0
|
<ASSISTANT_TASK:>
Python Code:
import great_expectations as ge
from ruamel import yaml
from great_expectations.core.batch import BatchRequest
from great_expectations.rule_based_profiler.rule.rule import Rule
from great_expectations.rule_based_profiler.rule_based_profiler import RuleBasedProfiler, RuleBasedProfilerResult
from great_expectations.rule_based_profiler.domain_builder import (
DomainBuilder,
ColumnDomainBuilder,
)
from great_expectations.rule_based_profiler.parameter_builder import (
MetricMultiBatchParameterBuilder,
)
from great_expectations.rule_based_profiler.expectation_configuration_builder import (
DefaultExpectationConfigurationBuilder,
)
data_context: ge.DataContext = ge.get_context()
data_path: str = "../../../../test_sets/taxi_yellow_tripdata_samples"
datasource_config = {
"name": "taxi_multi_batch_datasource",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"default_inferred_data_connector_name": {
"class_name": "InferredAssetFilesystemDataConnector",
"base_directory": data_path,
"default_regex": {
"group_names": ["data_asset_name", "month"],
"pattern": "(yellow_tripdata_sample_2018)-(\\d.*)\\.csv",
},
},
"default_inferred_data_connector_name_all_years": {
"class_name": "InferredAssetFilesystemDataConnector",
"base_directory": data_path,
"default_regex": {
"group_names": ["data_asset_name", "year", "month"],
"pattern": "(yellow_tripdata_sample)_(\\d.*)-(\\d.*)\\.csv",
},
},
},
}
data_context.test_yaml_config(yaml.dump(datasource_config))
# add_datasource only if it doesn't already exist in our configuration
try:
data_context.get_datasource(datasource_config["name"])
except ValueError:
data_context.add_datasource(**datasource_config)
single_batch_batch_request: BatchRequest = BatchRequest(
datasource_name="taxi_multi_batch_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="yellow_tripdata_sample_2018",
data_connector_query={"index": -1},
)
multi_batch_batch_request: BatchRequest = BatchRequest(
datasource_name="taxi_multi_batch_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="yellow_tripdata_sample_2018",
)
domain_builder: DomainBuilder = ColumnDomainBuilder(
include_column_name_suffixes=["_amount"],
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
# assert that the domains we get are the ones we expect
assert len(domains) == 4
assert domains == [
{"rule_name": "my_rule", "domain_type": "column", "domain_kwargs": {"column": "fare_amount"}, "details": {"inferred_semantic_domain_type": {"fare_amount": "numeric",}},},
{"rule_name": "my_rule", "domain_type": "column", "domain_kwargs": {"column": "tip_amount"}, "details": {"inferred_semantic_domain_type": {"tip_amount": "numeric",}},},
{"rule_name": "my_rule", "domain_type": "column", "domain_kwargs": {"column": "tolls_amount"}, "details": {"inferred_semantic_domain_type": {"tolls_amount": "numeric",}},},
{"rule_name": "my_rule", "domain_type": "column", "domain_kwargs": {"column": "total_amount"}, "details": {"inferred_semantic_domain_type": {"total_amount": "numeric",}},},
]
default_expectation_configuration_builder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_values_to_not_be_null",
column="$domain.domain_kwargs.column", # Get the column from domain_kwargs that are retrieved from the DomainBuilder
)
simple_rule: Rule = Rule(
name="rule_with_no_parameters",
variables=None,
domain_builder=domain_builder,
expectation_configuration_builders=[default_expectation_configuration_builder],
)
from great_expectations.core import ExpectationSuite
from great_expectations.rule_based_profiler.rule_based_profiler import RuleBasedProfiler
my_rbp: RuleBasedProfiler = RuleBasedProfiler(
name="my_simple_rbp", data_context=data_context, config_version=1.0
)
my_rbp.add_rule(rule=simple_rule)
profiler_result: RuleBasedProfilerResult
profiler_result = my_rbp.run(batch_request=single_batch_batch_request)
assert len(profiler_result.expectation_configurations) == 4
profiler_result.expectation_configurations
domain_builder: DomainBuilder = ColumnDomainBuilder(
include_column_name_suffixes=["_amount"],
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
domains
numeric_range_parameter_builder: MetricMultiBatchParameterBuilder = (
MetricMultiBatchParameterBuilder(
data_context=data_context,
metric_name="column.min",
metric_domain_kwargs="$domain.domain_kwargs", # domain kwarg values are accessible using fully qualified parameters
name="my_column_min",
)
)
config_builder: DefaultExpectationConfigurationBuilder = (
DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_values_to_be_greater_than",
value="$parameter.my_column_min.value[-1]", # the parameter is accessible using a fully qualified parameter
column="$domain.domain_kwargs.column", # domain kwarg values are accessible using fully qualified parameters
name="my_column_min",
)
)
simple_rule: Rule = Rule(
name="rule_with_parameters",
variables=None,
domain_builder=domain_builder,
parameter_builders=[numeric_range_parameter_builder],
expectation_configuration_builders=[config_builder],
)
my_rbp = RuleBasedProfiler(name="my_rbp", data_context=data_context, config_version=1.0)
my_rbp.add_rule(rule=simple_rule)
profiler_result = my_rbp.run(batch_request=single_batch_batch_request)
assert len(profiler_result.expectation_configurations) == 4
profiler_result.expectation_configurations
variables: dict = {
"multi_batch_batch_request": multi_batch_batch_request,
"estimator_name": "bootstrap",
"false_positive_rate": 5.0e-2,
}
my_rbp = RuleBasedProfiler(name="my_complex_rbp", data_context=data_context, variables=variables, config_version=1.0)
from great_expectations.rule_based_profiler.domain_builder import ColumnDomainBuilder
domain_builder: DomainBuilder = ColumnDomainBuilder(
include_column_names=["tip_amount", "fare_amount"],
data_context=data_context,
)
from great_expectations.rule_based_profiler.parameter_builder import NumericMetricRangeMultiBatchParameterBuilder
min_range_parameter_builder: NumericMetricRangeMultiBatchParameterBuilder = NumericMetricRangeMultiBatchParameterBuilder(
name="min_range_parameter_builder",
metric_name="column.min",
metric_domain_kwargs="$domain.domain_kwargs",
false_positive_rate='$variables.false_positive_rate',
estimator="$variables.estimator_name",
data_context=data_context,
)
max_range_parameter_builder: NumericMetricRangeMultiBatchParameterBuilder = NumericMetricRangeMultiBatchParameterBuilder(
name="max_range_parameter_builder",
metric_name="column.max",
metric_domain_kwargs="$domain.domain_kwargs",
false_positive_rate="$variables.false_positive_rate",
estimator="$variables.estimator_name",
data_context=data_context,
)
expect_column_min: DefaultExpectationConfigurationBuilder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_min_to_be_between",
column="$domain.domain_kwargs.column",
min_value="$parameter.min_range_parameter_builder.value[0]",
max_value="$parameter.min_range_parameter_builder.value[1]",
)
expect_column_max: DefaultExpectationConfigurationBuilder = DefaultExpectationConfigurationBuilder(
expectation_type="expect_column_max_to_be_between",
column="$domain.domain_kwargs.column",
min_value="$parameter.max_range_parameter_builder.value[0]",
max_value="$parameter.max_range_parameter_builder.value[1]",
)
more_complex_rule: Rule = Rule(
name="rule_with_parameters",
variables=None,
domain_builder=domain_builder,
parameter_builders=[min_range_parameter_builder, max_range_parameter_builder],
expectation_configuration_builders=[expect_column_min, expect_column_max],
)
my_rbp.add_rule(rule=more_complex_rule)
profiler_result = my_rbp.run(batch_request=multi_batch_batch_request)
profiler_result.expectation_configurations
from great_expectations.rule_based_profiler.domain_builder import ColumnDomainBuilder
domain_builder: DomainBuilder = ColumnDomainBuilder(
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
assert len(domains) == 18 # all columns in yellow_tripdata_sample_2018
domain_builder: DomainBuilder = ColumnDomainBuilder(
include_column_names=["vendor_id"],
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
domains
domain_builder: DomainBuilder = ColumnDomainBuilder(
exclude_column_names=["vendor_id"],
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
assert len(domains) == 17 # all columns in yellow_tripdata_sample_2018 with vendor_id excluded
domains
domain_builder: DomainBuilder = ColumnDomainBuilder(
include_semantic_types=['numeric'],
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
assert len(domains) == 15 # columns in yellow_trip_data_sample_2018 that are numeric
from great_expectations.rule_based_profiler.domain_builder import MultiColumnDomainBuilder
domain_builder: DomainBuilder = MultiColumnDomainBuilder(
include_column_names=["vendor_id", "fare_amount", "tip_amount"],
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
assert len(domains) == 1 # 3 columns are part of a single multi-column domain.
expected_columns: list = ["vendor_id", "fare_amount", "tip_amount"]
assert domains[0]["domain_kwargs"]["column_list"] == expected_columns
from great_expectations.rule_based_profiler.domain_builder import ColumnPairDomainBuilder
domain_builder: DomainBuilder = ColumnPairDomainBuilder(
include_column_names=["vendor_id", "fare_amount"],
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
assert len(domains) == 1 # 2 columns are part of a single multi-column domain.
expect_columns_dict: dict = {'column_A': 'fare_amount', 'column_B': 'vendor_id'}
assert domains[0]["domain_kwargs"] == expect_columns_dict
from great_expectations.rule_based_profiler.domain_builder import TableDomainBuilder
domain_builder: DomainBuilder = TableDomainBuilder(
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
domains
from great_expectations.rule_based_profiler.domain_builder import MapMetricColumnDomainBuilder
domain_builder: DomainBuilder = MapMetricColumnDomainBuilder(
map_metric_name="column_values.nonnull",
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
len(domains) == 17 # filtered 1 column that was all None
from great_expectations.rule_based_profiler.domain_builder import CategoricalColumnDomainBuilder
domain_builder: DomainBuilder = CategoricalColumnDomainBuilder(
cardinality_limit_mode="very_few", # VERY_FEW = 10 or less
data_context=data_context,
)
domains: list = domain_builder.get_domains(rule_name="my_rule", batch_request=single_batch_batch_request)
assert len(domains) == 7
from great_expectations.rule_based_profiler.types.domain import Domain
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.types import ParameterContainer
domain: Domain = Domain(rule_name="my_rule", domain_type=MetricDomainTypes.COLUMN, domain_kwargs = {'column': 'total_amount'})
from great_expectations.rule_based_profiler.parameter_builder import MetricMultiBatchParameterBuilder
numeric_range_parameter_builder: MetricMultiBatchParameterBuilder = (
MetricMultiBatchParameterBuilder(
data_context=data_context,
metric_name="column.min",
metric_domain_kwargs=domain.domain_kwargs,
name="my_column_min",
)
)
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters = {
domain.id: parameter_container,
}
numeric_range_parameter_builder.build_parameters(
domain=domain,
parameters=parameters,
batch_request=multi_batch_batch_request,
)
# we check the parameter container
print(parameter_container.parameter_nodes)
min(parameter_container.parameter_nodes["parameter"]["parameter"]["my_column_min"]["value"])
from great_expectations.rule_based_profiler.parameter_builder import ValueSetMultiBatchParameterBuilder
domain: Domain = Domain(rule_name="my_rule", domain_type=MetricDomainTypes.COLUMN, domain_kwargs = {'column': 'vendor_id'})
# instantiating a new parameter container, since it can contain the results of more than one ParmeterBuilder.
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters[domain.id] = parameter_container
value_set_parameter_builder: ValueSetMultiBatchParameterBuilder = (
ValueSetMultiBatchParameterBuilder(
data_context=data_context,
metric_domain_kwargs=domain.domain_kwargs,
name="my_value_set",
)
)
value_set_parameter_builder.build_parameters(
domain=domain,
parameters=parameters,
batch_request=multi_batch_batch_request,
)
print(parameter_container.parameter_nodes)
from great_expectations.rule_based_profiler.parameter_builder import RegexPatternStringParameterBuilder
domain: Domain = Domain(rule_name="my_rule", domain_type=MetricDomainTypes.COLUMN, domain_kwargs = {'column': 'vendor_id'})
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters[domain.id] = parameter_container
regex_parameter_builder: RegexPatternStringParameterBuilder = (
RegexPatternStringParameterBuilder(
data_context=data_context,
metric_domain_kwargs=domain.domain_kwargs,
name="my_regex_set",
)
)
regex_parameter_builder.build_parameters(
domain=domain,
parameters=parameters,
batch_request=single_batch_batch_request,
)
print(parameter_container.parameter_nodes)
regex_parameter_builder: RegexPatternStringParameterBuilder = (
RegexPatternStringParameterBuilder(
data_context=data_context,
metric_domain_kwargs=domain.domain_kwargs,
candidate_regexes=["^\\d{1}$"],
name="my_regex_set",
)
)
regex_parameter_builder.build_parameters(
domain=domain,
parameters=parameters,
batch_request=single_batch_batch_request,
)
print(parameter_container.parameter_nodes)
from great_expectations.rule_based_profiler.parameter_builder import SimpleDateFormatStringParameterBuilder
domain: Domain = Domain(rule_name="my_rule", domain_type=MetricDomainTypes.COLUMN, domain_kwargs = {'column': 'pickup_datetime'})
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters[domain.id] = parameter_container
simple_date_format_string_parameter_builder: SimpleDateFormatStringParameterBuilder = (
SimpleDateFormatStringParameterBuilder(
data_context=data_context,
metric_domain_kwargs=domain.domain_kwargs,
name="my_value_set",
)
)
simple_date_format_string_parameter_builder.build_parameters(
domain=domain,
parameters=parameters,
batch_request=single_batch_batch_request,
)
print(parameter_container.parameter_nodes)
parameter_container.parameter_nodes["parameter"]["parameter"]["my_value_set"]["value"]
from great_expectations.rule_based_profiler.parameter_builder import NumericMetricRangeMultiBatchParameterBuilder
domain: Domain = Domain(rule_name="my_rule", domain_type=MetricDomainTypes.COLUMN, domain_kwargs = {'column': 'total_amount'})
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters[domain.id] = parameter_container
numeric_metric_range_parameter_builder: NumericMetricRangeMultiBatchParameterBuilder = NumericMetricRangeMultiBatchParameterBuilder(
name="column_mean_range",
metric_name="column.mean",
estimator="bootstrap",
metric_domain_kwargs=domain.domain_kwargs,
false_positive_rate=1.0e-2,
round_decimals=0,
data_context=data_context,
)
numeric_metric_range_parameter_builder.build_parameters(
domain=domain,
parameters=parameters,
batch_request=multi_batch_batch_request,
)
print(parameter_container.parameter_nodes)
#import shutil
# clean up Expectations directory after running tests
#shutil.rmtree("great_expectations/expectations/tmp")
#os.remove("great_expectations/expectations/.ge_store_backend_id")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Numpy operations
Step2: Arithmetic operations
Step3: The pandas arithmetic functions also have an option to fill missing values by replacing the missing one in either of the dataframes by some value.
Step4: Appending, Concatenating, and Merging
Step5: Sometimes, indexes do not matter, they can be resetted using <tt>ignore_index=True</tt>.
Step6: Concatenating is for adding lines and/or columns of multiples datasets (it is a generalization of appending)
Step7: The most important settings of the <tt>concat</tt> function are <tt>pd.concat(objs, axis=0, join='outer',ignore_index=False)</tt> where <br/>
Step8: Merging is for putting together two dataframes with hopefully common data
Step9: Preparing the Data
Step10: Missing values
Step11: Carefully pruning data
Step12: To fill missing data (from a certain column), the recommended way is to use fillna() through dataframe["col"].fillna(value,inplace=True) which replace all missing values in the column col of dataframe by value in place that is without copy (again this is irreversible, to use the copy version use inplace=False).
Step13: This indeed gives the correct result, however, the line indexing presents missing number. The indexes can be resetted with reset_index(inplace=True,drop=True)
Step14: Improper values
Step15: Now, we drop the corresponding line, with drop by drop(problematic_row.index, inplace=True).
Step16: And finally the ratings.
Step17: We finally have our dataset cured! Let us save it for further use.
Step18: Basic Statistics
Step19: The following table summarizes some other built-in Pandas aggregations
Step20: We see that these statistics do not make sense for all rows. Let us drop the timestamp and examine the ratings.
Step21: GroupBy
Step22: So to get the mean of the ratings per user, the command is
Step23: Filtering
Step24: Transformations
Step25: Aggregations [*]
Step26: Exercises
Step27: Exercise
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
df = pd.DataFrame(np.random.randint(0, 10, (3, 4)), columns=['A', 'B', 'C', 'D'])
df
np.cos(df * np.pi/2 ) - 1
A = pd.DataFrame(np.random.randint(0, 20, (2, 2)), columns=list('AB'))
A
B = pd.DataFrame(np.random.randint(0, 10, (3, 3)), columns=list('BAC'))
B
A+B
A.add(B, fill_value=0.0)
A = pd.DataFrame(np.random.randint(0, 20, (2, 2)), columns=list('AB'))
A2 = pd.DataFrame(np.random.randint(0, 20, (3, 2)), columns=list('AB'))
print("A:\n",A,"\nA2:\n",A2)
A.append(A2) # this does not "append to A" but creates a new dataframe
A.append(A2,ignore_index=True)
A = pd.DataFrame(np.random.randint(0, 20, (2, 2)), columns=list('AB'))
A2 = pd.DataFrame(np.random.randint(0, 20, (3, 2)), columns=list('AB'))
A3 = pd.DataFrame(np.random.randint(0, 20, (1, 3)), columns=list('CAD'))
print("A:\n",A,"\nA2:\n",A2,"\nA3:\n",A3)
pd.concat([A,A2,A3],ignore_index=True)
pd.concat([A,A2,A3],axis=1)
pd.concat([A,A2,A3],axis=1,ignore_index=True,join='inner')
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df1
df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]})
df2
df3 = pd.merge(df1,df2)
df3
df4 = pd.DataFrame({'group': ['Accounting', 'Engineering', 'HR'],
'supervisor': ['Carly', 'Guido', 'Steve']})
df4
pd.merge(df3,df4)
ratings = pd.read_csv('data/ml-small/ratings_mess.csv')
ratings.head(7) # enables to display the top n lines of a dataframe, 5 by default
ratings.isnull().head(5)
ratings.dropna(subset=["userId","movieId","rating"],inplace=True)
ratings.head(5)
ratings["timestamp"].fillna(0,inplace=True)
ratings.head(7)
ratings.reset_index(inplace=True,drop=True)
ratings.head(7)
ratings[ratings["userId"]<1] # Identifying a problem
ratings.drop(ratings[ratings["userId"]<1].index, inplace=True)
ratings.head(7)
pb_rows = ratings[ratings["movieId"]<1]
pb_rows
ratings.drop(pb_rows.index, inplace=True)
pb_rows = ratings[ratings["rating"]<0]
pb_rows2 = ratings[ratings["rating"]>5]
tot_pb_rows = pb_rows.append(pb_rows2 )
tot_pb_rows
ratings.drop(tot_pb_rows.index, inplace=True)
ratings.reset_index(inplace=True,drop=True)
ratings.to_csv("data/ml-small/ratings_cured.csv",index=False)
ratings = pd.read_csv('data/ml-small/ratings_cured.csv')
ratings.head()
ratings.describe()
ratings.drop("timestamp",axis=1,inplace=True)
ratings.head()
ratings["rating"].describe()
ratings.head()
ratings.groupby("userId")["rating"].mean()
ratings.groupby("userId")["rating"].count()
def filter_func(x):
return x["rating"].count() >= 2
filtered = ratings.groupby("userId").filter(filter_func)
filtered
filtered.groupby("userId")["rating"].count()
ratings.groupby("userId")["rating"].mean()
def center_ratings(x):
x["rating"] = x["rating"] - x["rating"].mean()
return x
centered = ratings.groupby("userId").apply(center_ratings)
centered.groupby("userId")["rating"].mean()
ratings.groupby("userId")["rating"].aggregate([min,max,np.mean,np.median,len])
import pandas as pd
import numpy as np
ratings_bots = pd.read_csv('data/ml-small/ratings_bots.csv')
import pandas as pd
import numpy as np
planets = pd.read_csv('data/planets.csv')
print(planets.shape)
planets.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: GRU RNNs
Step2: How does this work on anything that is not a real movie review?
|
<ASSISTANT_TASK:>
Python Code:
# Based on
# https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/6.2-understanding-recurrent-neural-networks.ipynb
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from tensorflow import keras
# https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification
max_features = 10000 # number of words to consider as features
maxlen = 500 # cut texts after this number of words (among top max_features most common words)
# each review is encoded as a sequence of word indexes
# indexed by overall frequency in the dataset
# output is 0 (negative) or 1 (positive)
imdb = tf.keras.datasets.imdb.load_data(num_words=max_features)
(raw_input_train, y_train), (raw_input_test, y_test) = imdb
# https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences
input_train = tf.keras.preprocessing.sequence.pad_sequences(raw_input_train, maxlen=maxlen)
input_test = tf.keras.preprocessing.sequence.pad_sequences(raw_input_test, maxlen=maxlen)
# Batch Normalization:
# https://towardsdatascience.com/batch-normalization-in-neural-networks-1ac91516821c
# https://www.quora.com/Why-does-batch-normalization-help
from tensorflow.keras.layers import GRU, Embedding, Bidirectional, BatchNormalization, Dropout
embedding_dim = 32
dropout = 0.6
recurrent_dropout = 0.4
model = keras.Sequential()
# encoder
model.add(Embedding(input_dim=max_features, output_dim=embedding_dim, input_length=maxlen))
# https://arxiv.org/ftp/arxiv/papers/1701/1701.05923.pdf
# n = output dimension
# m = input dimension
# Total number of parameters for
# RNN = n**2 + nm + n
# GRU = 3 × (n**2 + nm + n)
# LSTM = 4 × (n**2 + nm + n)
# return_sequences passes all outputs of all timesteps (not only the last one) to the next layer
model.add(GRU(name='gru1', units=32, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True))
# for embedding: 32*2 (“standard deviation” parameter (gamma), “mean” parameter (beta)) trainable parameters
# and 32*2 (moving_mean and moving_variance) non-trainable parameters
model.add(BatchNormalization())
model.add(Dropout(dropout))
# stack recurrent layers like with fc
model.add(GRU(name='gru2', units=32))
model.add(BatchNormalization())
model.add(Dropout(dropout))
# latent space
model.add(tf.keras.layers.Dense(name='fc', units=32, activation='relu'))
# binary classifier as decoder
model.add(tf.keras.layers.Dense(name='classifier', units=1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
batch_size = 1000
%time history = model.fit(input_train, y_train, epochs=15, batch_size=batch_size, validation_split=0.2)
train_loss, train_accuracy = model.evaluate(input_train, y_train, batch_size=batch_size)
train_accuracy
test_loss, test_accuracy = model.evaluate(input_test, y_test, batch_size=batch_size)
test_accuracy
def plot_history(history, samples=10, init_phase_samples=None):
epochs = history.params['epochs']
acc = history.history['acc']
val_acc = history.history['val_acc']
every_sample = int(epochs / samples)
acc = pd.DataFrame(acc).iloc[::every_sample, :]
val_acc = pd.DataFrame(val_acc).iloc[::every_sample, :]
fig, ax = plt.subplots(figsize=(20,5))
ax.plot(acc, 'bo', label='Training acc')
ax.plot(val_acc, 'b', label='Validation acc')
ax.set_title('Training and validation accuracy')
ax.legend()
plot_history(history)
# precition
model.predict(input_test[0:5])
# ground truth
y_test[0:5]
word_to_id = keras.datasets.imdb.get_word_index()
def encode_text(text):
input_words = text.lower().split()
input_tokens = np.array([word_to_id[word] for word in input_words])
padded_input_tokens = keras.preprocessing.sequence.pad_sequences([input_tokens], maxlen=maxlen)
return padded_input_tokens
def predict_text(model, text):
input_sequence = encode_text(text)
embeddings = model.predict(input_sequence)
return embeddings
predict_text(model, "don't watch this movie")
predict_text(model, "lovely")
predict_text(model, "pathetic shit")
predict_text(model, "this is not a shit movie")
predict_text(model, "such a bad movie")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Median filtering
Step2: It looks like skimage's median filter is only good for uint8 data types. If its a float it will be downgraded to uint8 and if its uint16 its really, really, slow.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from scipy import ndimage
from skimage import filters, data
import skimage as ski
myData = uint16(ski.exposure.rescale_intensity(ski.color.rgb2gray(data.lena()),out_range='uint16'))
matshow(myData, cmap = 'gray')
#float64 images
myData = ski.color.rgb2gray(data.lena())
ski_mf_data = filters.median(myData,uint8(ones((10,10))))
nd_mf_data = ndimage.median_filter(myData,10)
fig, ax = subplots(1,2,squeeze=True)
ax[0].matshow(ski_mf_data,cmap='gray')
ax[1].matshow(nd_mf_data,cmap='gray')
fig.tight_layout()
print(ski_mf_data.dtype)
print(nd_mf_data.dtype)
#16 bit images
myData = uint16(ski.exposure.rescale_intensity(ski.color.rgb2gray(data.lena()),out_range='uint16'))
ski_mf_data = filters.median(myData,uint8(ones((10,10))))
nd_mf_data = ndimage.median_filter(myData,10)
fig, ax = subplots(1,2,squeeze=True)
ax[0].matshow(ski_mf_data,cmap='gray')
ax[1].matshow(nd_mf_data,cmap='gray')
fig.tight_layout()
print(ski_mf_data.dtype)
print(nd_mf_data.dtype)
#8 bit images
myData = uint8(ski.exposure.rescale_intensity(ski.color.rgb2gray(data.lena()),out_range='uint8'))
ski_mf_data = filters.median(myData,uint8(ones((10,10))))
nd_mf_data = ndimage.median_filter(myData,10)
fig, ax = subplots(1,2,squeeze=True)
ax[0].matshow(ski_mf_data,cmap='gray')
ax[1].matshow(nd_mf_data,cmap='gray')
fig.tight_layout()
print(ski_mf_data.dtype)
print(nd_mf_data.dtype)
#float64
myData = ski.color.rgb2gray(data.lena())
%timeit filters.median(myData,ones((10,10)))
%timeit filters.rank.median(myData,ones((10,10)))
%timeit nd_mf_data = ndimage.median_filter(myData,10)
myData.dtype
#uint16
myData = uint16(ski.exposure.rescale_intensity(ski.color.rgb2gray(data.lena()),out_range='uint16'))
%timeit filters.median(myData,ones((10,10)))
%timeit filters.median(myData,uint8(ones((10,10))))
%timeit nd_mf_data = ndimage.median_filter(myData,10)
myData.dtype
#uint8
myData = uint8(ski.exposure.rescale_intensity(ski.color.rgb2gray(data.lena()),out_range='uint8'))
%timeit filters.median(myData,ones((10,10)))
%timeit filters.median(myData,uint8(ones((10,10))))
%timeit nd_mf_data = ndimage.median_filter(myData,10)
myData.dtype
#take a look at the difference
matshow(ski_mf_data*1.0-1.0*nd_mf_data)
colorbar()
#The edges are different between the two methods. I don't really know why...
matshow((ski_mf_data*1.0-1.0*nd_mf_data)[0:15,0:15])
colorbar()
matshow((ski_mf_data*1.0-1.0*nd_mf_data)[-16:-1,0:15])
colorbar()
matshow((ski_mf_data*1.0-1.0*nd_mf_data)[0:15,-16:-1])
colorbar()
matshow((ski_mf_data*1.0-1.0*nd_mf_data)[-16:-1,-16:-1])
colorbar()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. listing the name of the first 10 products in the dataset.
Step2: 2. counting the number of positive and negative reviews.
Step3: Apply text cleaning on the review data
Step4: 4. data transformations
Step5: 5. compute a count for the number of times the word occurs in the review
Step6: ### 7. compute the number of product reviews that contain the word perfect.
Step7: 1. Quiz Question.
Step8: 9. extract two arrays feature_matrix and sentiment
Step9: 2. Quiz Question
Step10: 2. Answer
Step12: Compute derivative of log likelihood with respect to a single coefficient
Step13: 12. Write a function compute_log_likelihood
Step15: Taking gradient steps
Step16: 14. run the logistic regression solver
Step18: 5. Quiz question
Step19: 6. Quiz question
Step20: 7. Quiz question
Step21: Ten "most positive" words
Step22: 8. Quiz question
Step23: 9. Quiz question
|
<ASSISTANT_TASK:>
Python Code:
products = pd.read_csv('amazon_baby_subset.csv')
products['name'][:10]
print (products['sentiment'] == 1).sum()
print (products['sentiment'] == -1).sum()
print (products['sentiment']).count()
import json
with open('important_words.json') as important_words_file:
important_words = json.load(important_words_file)
print important_words[:3]
products = products.fillna({'review':''}) # fill in N/A's in the review column
def remove_punctuation(text):
import string
return text.translate(None, string.punctuation)
products['review_clean'] = products['review'].apply(remove_punctuation)
products.head(3)
for word in important_words:
products[word] = products['review_clean'].apply(lambda s : s.split().count(word))
products.head(1)
products['contains_perfect'] = products['perfect'] >=1
print products['contains_perfect'].sum()
def get_numpy_data(dataframe, features, label):
dataframe['constant'] = 1
features = ['constant'] + features
features_frame = dataframe[features]
feature_matrix = features_frame.as_matrix()
label_sarray = dataframe[label]
label_array = label_sarray.as_matrix()
return(feature_matrix, label_array)
feature_matrix, sentiment = get_numpy_data(products, important_words, 'sentiment')
print feature_matrix.shape
'''
feature_matrix: N * D
coefficients: D * 1
predictions: N * 1
produces probablistic estimate for P(y_i = +1 | x_i, w).
estimate ranges between 0 and 1.
'''
def predict_probability(feature_matrix, coefficients):
# Take dot product of feature_matrix and coefficients
# YOUR CODE HERE
score = np.dot(feature_matrix, coefficients) # N * 1
# Compute P(y_i = +1 | x_i, w) using the link function
# YOUR CODE HERE
predictions = 1.0/(1+np.exp(-score))
# return predictions
return predictions
errors: N * 1
feature: N * 1
derivative: 1
def feature_derivative(errors, feature):
# Compute the dot product of errors and feature
derivative = np.dot(np.transpose(errors), feature)
# Return the derivative
return derivative
def compute_log_likelihood(feature_matrix, sentiment, coefficients):
indicator = (sentiment==+1)
scores = np.dot(feature_matrix, coefficients)
# scores.shape (53072L, 1L)
# indicator.shape (53072L,)
lp = np.sum((np.transpose(np.array([indicator]))-1)*scores - np.log(1. + np.exp(-scores)))
return lp
# coefficients: D * 1
from math import sqrt
def logistic_regression(feature_matrix, sentiment, initial_coefficients, step_size, max_iter):
coefficients = np.array(initial_coefficients) # make sure it's a numpy array
# lplist = []
for itr in xrange(max_iter):
# Predict P(y_i = +1|x_1,w) using your predict_probability() function
# YOUR CODE HERE
predictions = predict_probability(feature_matrix, coefficients)
# Compute indicator value for (y_i = +1)
indicator = (sentiment==+1)
# Compute the errors as indicator - predictions
errors = np.transpose(np.array([indicator])) - predictions
for j in xrange(len(coefficients)): # loop over each coefficient
# Recall that feature_matrix[:,j] is the feature column associated with coefficients[j]
# compute the derivative for coefficients[j]. Save it in a variable called derivative
# YOUR CODE HERE
derivative = feature_derivative(errors, feature_matrix[:,j])
# add the step size times the derivative to the current coefficient
# YOUR CODE HERE
coefficients[j] += step_size*derivative
# Checking whether log likelihood is increasing
if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \
or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:
# lplist.append(compute_log_likelihood(feature_matrix, sentiment, coefficients))
lp = compute_log_likelihood(feature_matrix, sentiment, coefficients)
print 'iteration %*d: log likelihood of observed labels = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, lp)
import matplotlib.pyplot as plt
x= [i for i in range(len(lplist))]
plt.plot(x,lplist,'ro')
plt.show()
return coefficients
initial_coefficients = np.zeros((194,1))
step_size = 1e-7
max_iter = 301
coefficients = logistic_regression(feature_matrix, sentiment, initial_coefficients, step_size, max_iter)
coefficients = logistic_regression(feature_matrix, sentiment, initial_coefficients, step_size, max_iter)
feature_matrix: N * D
coefficients: D * 1
predictions: N * 1
predictions = predict_probability(feature_matrix, coefficients)
NumPositive = (predictions > 0.5).sum()
print NumPositive
score = np.dot(feature_matrix, coefficients) # N * 1
print (score > 0).sum()
print 0 in products['sentiment']
print -1 in products['sentiment']
print np.transpose(predictions.flatten()).shape
print (products['sentiment']).shape
print (np.transpose(predictions.flatten()))[:5]
correct_num = np.sum((np.transpose(predictions.flatten())> 0.5) == np.array(products['sentiment']>0))
total_num = len(products['sentiment'])
print "correct_num: {}, total_num: {}".format(correct_num, total_num)
accuracy = correct_num * 1./ total_num
print accuracy
np.transpose(predictions.flatten())> 0.5
np.array(products['sentiment']>0)
correct_num = np.sum((np.transpose(score.flatten())> 0) == np.array(products['sentiment']>0))
total_num = len(products['sentiment'])
print "correct_num: {}, total_num: {}".format(correct_num, total_num)
accuracy = correct_num * 1./ total_num
print accuracy
coefficients = list(coefficients[1:]) # exclude intercept
word_coefficient_tuples = [(word, coefficient) for word, coefficient in zip(important_words, coefficients)]
word_coefficient_tuples = sorted(word_coefficient_tuples, key=lambda x:x[1], reverse=True)
word_coefficient_tuples[:10]
word_coefficient_tuples[-10:]
print np.array([1,2,3])==np.array([1,3,2])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: T81-558
Step2: Define CNN
Step3: Training/Fitting CNN
Step4: Evaluate Accuracy
|
<ASSISTANT_TASK:>
Python Code:
Functions for downloading and reading MNIST data.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
# Loading MNIST data
mnist = read_data_sets('MNIST_data')
import tensorflow.contrib.learn as skflow
from sklearn import datasets, metrics
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
def conv_model(X, y):
# reshape X to 4d tensor with 2nd as image width, 3rd dimension
# as image height, 4th dimension as color channels.
X = tf.reshape(X, [-1, 28, 28, 1])
# Conv Layer #1: 32 channels/neurons for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = skflow.ops.conv2d(X, n_filters=32, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# second conv layer will compute 64 channels for each 5x5 patch
with tf.variable_scope('conv_layer2'):
h_conv2 = skflow.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# Reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# densely connected layer with 256 neurons
h_fc1 = skflow.ops.dnn(h_pool2_flat, [256], activation=tf.nn.relu)
return skflow.models.logistic_regression(h_fc1, y)
# To fit/train use either the simple train (this box) or the early stop (next box)
# Do not use both. This box is faster, but no early stop
classifier = skflow.TensorFlowEstimator(
model_fn=conv_model, n_classes=10, batch_size=100, steps=3000,
learning_rate=0.001)
classifier.fit(mnist.train.images, mnist.train.labels)
# Early stopping - WARNING, this is slow on Data Scientist Workbench
# Do not run both this (and previous box) Choose one or the other.
# Training and predicting
classifier = skflow.TensorFlowEstimator(
model_fn=conv_model, n_classes=10, batch_size=100, steps=500,
learning_rate=0.001)
early_stop = skflow.monitors.ValidationMonitor(mnist.validation.images,
mnist.validation.labels, n_classes=10,
early_stopping_rounds=200, print_steps=50)
# Fit/train neural network
classifier.fit(mnist.train.images, mnist.train.labels, monitor=early_stop)
from sklearn import metrics
# Evaluate success using accuracy
pred = classifier.predict(mnist.test.images)
score = metrics.accuracy_score(pred, mnist.test.labels)
print("Accuracy score: {}".format(score))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up our toy problem (1D optimisation of the forrester function) and collect 3 initial points.
Step2: Fit our GP model to the observed data.
Step3: Lets plot the resulting acqusition functions for the chosen model on the collected data. Note that MES takes a fraction of the time of ES to compute (plotted on a log scale). This difference becomes even more apparent as you increase the dimensions of the sample space.
|
<ASSISTANT_TASK:>
Python Code:
### General imports
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import GPy
import time
### Emukit imports
from emukit.test_functions import forrester_function
from emukit.core.loop.user_function import UserFunctionWrapper
from emukit.core import ContinuousParameter, ParameterSpace
from emukit.bayesian_optimization.acquisitions import EntropySearch, ExpectedImprovement, MaxValueEntropySearch
from emukit.model_wrappers.gpy_model_wrappers import GPyModelWrapper
### --- Figure config
LEGEND_SIZE = 15
target_function, space = forrester_function()
x_plot = np.linspace(space.parameters[0].min, space.parameters[0].max, 200)[:, None]
y_plot = target_function(x_plot)
X_init = np.array([[0.2],[0.6], [0.9]])
Y_init = target_function(X_init)
plt.figure(figsize=(12, 8))
plt.plot(x_plot, y_plot, "k", label="Objective Function")
plt.scatter(X_init,Y_init)
plt.legend(loc=2, prop={'size': LEGEND_SIZE})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(0, 1)
plt.show()
gpy_model = GPy.models.GPRegression(X_init, Y_init, GPy.kern.RBF(1, lengthscale=0.08, variance=20), noise_var=1e-10)
emukit_model = GPyModelWrapper(gpy_model)
ei_acquisition = ExpectedImprovement(emukit_model)
es_acquisition = EntropySearch(emukit_model,space)
mes_acquisition = MaxValueEntropySearch(emukit_model,space)
t_0=time.time()
ei_plot = ei_acquisition.evaluate(x_plot)
t_ei=time.time()-t_0
es_plot = es_acquisition.evaluate(x_plot)
t_es=time.time()-t_ei
mes_plot = mes_acquisition.evaluate(x_plot)
t_mes=time.time()-t_es
plt.figure(figsize=(12, 8))
plt.plot(x_plot, (es_plot - np.min(es_plot)) / (np.max(es_plot) - np.min(es_plot)), "green", label="Entropy Search")
plt.plot(x_plot, (ei_plot - np.min(ei_plot)) / (np.max(ei_plot) - np.min(ei_plot)), "blue", label="Expected Improvement")
plt.plot(x_plot, (mes_plot - np.min(mes_plot)) / (np.max(mes_plot) - np.min(mes_plot)), "red", label="Max Value Entropy Search")
plt.legend(loc=1, prop={'size': LEGEND_SIZE})
plt.xlabel(r"$x$")
plt.ylabel(r"$f(x)$")
plt.grid(True)
plt.xlim(0, 1)
plt.show()
plt.figure(figsize=(12, 8))
plt.bar(["ei","es","mes"],[t_ei,t_es,t_mes])
plt.xlabel("Acquisition Choice")
plt.yscale('log')
plt.ylabel("Calculation Time (secs)")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MUDANÇA DA VARIÁVEL INICIAL QUE MOSTRA O ANO DE PESQUISA.
Step2: DEFINIÇÃO DAS REGIÕES E TRANSFORMAÇÃO EM UMA CATEGORIA;
Step3: DIVISÃO EM ZONA RURAL E URBANA, A SEGUNDA VARIÁVEL DE ANÁLISE
Step4: CRIACÃO DA VARIÁVEL INSEGURANÇA ALIMENTAR
Step5: CRIAÇÃO DO "PROBLEMA ALIMENTAR"
Step6: FILTRAGEM INICIAL
Step7: TABELA 1 - 2013
Step8: TABELA 1 - 2009
Step9: PRIMEIRA OBSERVAÇÃO
Step10: APROFUNDAMENTO NAS REGIÕES
Step11: Nesse caso pode-se observar uma clara coerência entre os dados percentuais e absolutos, isso porque as regiões Norte e Nordeste mostram a maior frequência e número de pessoas que já passaram por situação de insegurança alimentar.
Step12: OBSERVAÇÃO DA SITUAÇÃO NA ZONA URBANA E RURAL
Step13: CRUZAMENTO DE DADOS
Step14: SEQUÊNCIA DE ANÁLISE PARA CADA ANO
Step15: ANÁLISE INICIAL E NOVA FILTRAGEM
Step16: TABELA 2 - 2013
Step17: TABELA 2 - 2009
Step18: Caracterização dos problemas alimentares
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
from numpy import zeros_like
print('Esperamos trabalhar no diretório')
print(os.getcwd())
base = pd.read_csv('DOM2013.csv',sep=',')
base9 = pd.read_csv('DOM2009.csv',sep=',')
base.V0101=base.V0101.astype("int")
base9.V0101=base9.V0101.astype("int")
base.loc[(base.UF<18),"REGIAO"]="NORTE"
base.loc[(base.UF>20)&(base.UF<30),"REGIAO"]="NORDESTE"
base.loc[(base.UF>30)&(base.UF<36),"REGIAO"]="SUDESTE"
base.loc[(base.UF>35)&(base.UF<44),"REGIAO"]="SUL"
base.loc[(base.UF>43)&(base.UF<54),"REGIAO"]="CENTRO-OESTE"
base.REGIAO=base.REGIAO.astype("category")
base9.loc[(base9.UF<18),"REGIAO"]="NORTE"
base9.loc[(base9.UF>20)&(base9.UF<30),"REGIAO"]="NORDESTE"
base9.loc[(base9.UF>30)&(base9.UF<36),"REGIAO"]="SUDESTE"
base9.loc[(base9.UF>35)&(base9.UF<44),"REGIAO"]="SUL"
base9.loc[(base9.UF>43)&(base9.UF<54),"REGIAO"]="CENTRO-OESTE"
base9.REGIAO=base9.REGIAO.astype("category")
base.loc[(base.V4105<4),"ZONA"]="Urbana"
base.loc[(base.V4105>3),"ZONA"]="Rural"
base.ZONA=base.ZONA.astype("category")
base9.loc[(base9.V4105<4),"ZONA"]="Urbana"
base9.loc[(base9.V4105>3),"ZONA"]="Rural"
base9.ZONA=base9.ZONA.astype("category")
base.loc[(base.V2103==1) | (base.V2105==1) | (base.V2107==1) | (base.V2109==1),'Insegurança_Alimentar'] = 'Sim'
base.loc[(base.V2103==3) & (base.V2105==3) & (base.V2107==3) & (base.V2109==3),'Insegurança_Alimentar'] = 'Não'
base.V2103=base.V2103.astype("category")
base.V2105=base.V2105.astype("category")
base.V2107=base.V2107.astype("category")
base.V2109=base.V2109.astype("category")
base9.loc[(base9.V2103==1) | (base9.V2105==1) | (base9.V2107==1) | (base9.V2109==1),'Insegurança_Alimentar'] = 'Sim'
base9.loc[(base9.V2103==3) & (base9.V2105==3) & (base9.V2107==3) & (base9.V2109==3),'Insegurança_Alimentar'] = 'Não'
base9.V2103=base9.V2103.astype("category")
base9.V2105=base9.V2105.astype("category")
base9.V2107=base9.V2107.astype("category")
base9.V2109=base9.V2109.astype("category")
base.loc[(base.V2113==1) | (base.V2115==1) | (base.V2117==1) | (base.V2121==1),'Problema_Alimentar'] = 'Sim'
base.loc[(base.V2113==3) & (base.V2115==3) & (base.V2117==3) & (base.V2121==3),'Problema_Alimentar'] = 'Não'
base.V2113=base.V2113.astype("category")
base.V2115=base.V2115.astype("category")
base.V2117=base.V2117.astype("category")
base.V2121=base.V2121.astype("category")
base9.loc[(base9.V2111==1) | (base9.V2113==1) | (base9.V2115==1) | (base9.V2117==1) | (base9.V2119==1) | (base9.V2120==1) | (base9.V2121==1),'Problema_Alimentar'] = 'Sim'
base9.loc[(base9.V2111==3) & (base9.V2113==3) & (base9.V2115==3) & (base9.V2117==3) & (base9.V2119==3) & (base9.V2120==3) & (base9.V2121==3),'Problema_Alimentar'] = 'Não'
base9.V2113=base9.V2113.astype("category")
base9.V2115=base9.V2115.astype("category")
base9.V2117=base9.V2117.astype("category")
base9.V2117=base9.V2119.astype("category")
base9.V2121=base9.V2120.astype("category")
base9.V2121=base9.V2121.astype("category")
base=base.loc[:,["V0101","REGIAO","ZONA","V4614",'Insegurança_Alimentar',"Problema_Alimentar"]]
base.columns=["ANO","REGIAO","ZONA","RENDA",'Insegurança_Alimentar',"Problema_Alimentar"]
base=base.dropna(subset=["RENDA","Insegurança_Alimentar"])
base
writer = pd.ExcelWriter('Tabela1-2013.xlsx',engine='xlsxwriter')
base.to_excel(writer,sheet_name="Projeto_1")
writer.save()
base9=base9.loc[:,["V0101","REGIAO","ZONA","V4614",'Insegurança_Alimentar',"Problema_Alimentar"]]
base9.columns=["ANO","REGIAO","ZONA","RENDA",'Insegurança_Alimentar',"Problema_Alimentar"]
base9=base9.dropna(subset=["RENDA","Insegurança_Alimentar"])
base9
writer = pd.ExcelWriter('Tabela1-2009.xlsx',engine='xlsxwriter')
base9.to_excel(writer,sheet_name="Projeto_1")
writer.save()
g1 = (base.Insegurança_Alimentar.value_counts(sort=False, normalize=True)*100).round(decimals=1)
plot = g1.plot(kind='bar',title='DIFICULDADE ALIMENTAR 2013 (G1)',figsize=(5, 5),color=('b','g'))
print(g1,"\n")
g2 = (base9.Insegurança_Alimentar.value_counts(sort=False, normalize=True)*100).round(decimals=1)
plot = g2.plot(kind='bar',title='DIFICULDADE ALIMENTAR 2009 (G2)',figsize=(5, 5),color=('b','g'))
print(g2,"\n")
tb1= (pd.crosstab(base.REGIAO,base.Insegurança_Alimentar,margins=True,rownames=["REGIÃO"],colnames=["Insegurança Alimentar"],normalize='index')*100).round(decimals=1)
plot = tb1.plot(kind="bar",title="Distribuição Regional de Insegurança Alimentar 2013 (G3)")
abs1=pd.crosstab(base.REGIAO,base.Insegurança_Alimentar, margins=True, rownames=['REGIÃO'], colnames=['INSEGURANÇA ALIMENTAR'])
abs1=abs1.loc[['NORTE','NORDESTE','SUDESTE','SUL','CENTRO-OESTE']]
abs1
tb19= (pd.crosstab(base9.REGIAO,base9.Insegurança_Alimentar,margins=True,rownames=["REGIÃO"],colnames=["Insegurança Alimentar"],normalize='index')*100).round(decimals=1)
plot = tb19.plot(kind="bar",title="Distribuição Regional de Insegurança Alimentar 2009 (G4)")
abs19=pd.crosstab(base9.REGIAO,base9.Insegurança_Alimentar, margins=True, rownames=['REGIÃO'], colnames=['INSEGURANÇA ALIMENTAR'])
abs19=abs19.loc[['NORTE','NORDESTE','SUDESTE','SUL','CENTRO-OESTE']]
abs19
tb2 = (pd.crosstab(base.ZONA,base.Insegurança_Alimentar,margins=True,rownames=["ZONA"],colnames=["Insegurança Alimentar"],normalize='index')*100).round(decimals=1)
plot = tb2.plot(kind="bar",title="Distribuição em Zonas de Insegurança Alimentar 2013 (G5)")
abs2=pd.crosstab(base.ZONA,base.Insegurança_Alimentar, margins=True, rownames=['ZONA'], colnames=['INSEGURANÇA ALIMENTAR'])
abs2=abs2.loc[['Rural','Urbana']]
abs2
tb29 = (pd.crosstab(base9.ZONA,base9.Insegurança_Alimentar,margins=True,rownames=["ZONA"],colnames=["Insegurança Alimentar"],normalize='index')*100).round(decimals=1)
plot = tb29.plot(kind="bar",title="Distribuição em Zonas de Insegurança Alimentar 2009 (G6)")
abs29=pd.crosstab(base9.ZONA,base9.Insegurança_Alimentar, margins=True, rownames=['ZONA'], colnames=['INSEGURANÇA ALIMENTAR'])
abs29=abs29.loc[['Rural','Urbana']]
abs29
ct1=(pd.crosstab([base.REGIAO, base.ZONA],base.Insegurança_Alimentar, normalize='index')*100).round(decimals=1)
ct1
print(ct1,'\n')
plot = ct1.plot(kind='bar',title="Análise de Insegurança Alimentar 2013 (G7)")
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('Freq.Relativa (em %)')
plt.show()
ct2=(pd.crosstab([base9.REGIAO, base9.ZONA],base9.Insegurança_Alimentar, normalize='index')*100).round(decimals=1)
ct2
print(ct2,'\n')
plot = ct2.plot(kind='bar',title="Análise de Insegurança Alimentar 2009 (G8)")
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('Freq.Relativa (em %)')
plt.show()
faixa = np.arange(0,7350,350)
frenda = pd.cut(base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO=="NORTE")], bins=faixa, right=False)
t1 = (frenda.value_counts(sort=False, normalize=True)*100).round(decimals=1)
print(t1,"\n")
plot = base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO=="NORTE")].plot.hist(bins=faixa,title="Histograma - Insegurança Alimentar - NORTE - 2013 (H1)", weights=zeros_like(base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO=="NORTE")])+1./base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO=="NORTE")].size*100, figsize=(6, 6), alpha=0.5)
plt.ylabel('Frequência relativa (em %)')
plt.xlabel('Renda (em reais)')
plt.show()
faixa = np.arange(0,7350,350)
frenda2 = pd.cut(base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO=="NORDESTE")], bins=faixa, right=False)
t2 = (frenda2.value_counts(sort=False, normalize=True)*100).round(decimals=1)
print(t2,"\n")
plot = base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO=="NORDESTE")].plot.hist(bins=faixa,title="Histograma - Insegurança Alimentar - NORDESTE - 2013(H2)", weights=zeros_like(base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO=="NORDESTE")])+1./base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO=="NORDESTE")].size*100, figsize=(6, 6), alpha=0.5,color="red")
plt.ylabel('Frequência relativa (em %)')
plt.xlabel('Renda (em reais)')
plt.show()
frenda9 = pd.cut(base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base.REGIAO=="CENTRO-OESTE")], bins=faixa, right=False)
t19 = (frenda9.value_counts(sort=False, normalize=True)*100).round(decimals=1)
print(t19,"\n")
plot = base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO=="CENTRO-OESTE")].plot.hist(bins=faixa,title="Histograma - Insegurança Alimentar - CENTRO-OESTE - 2009(H3)", weights=zeros_like(base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO=="CENTRO-OESTE")])+1./base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO=="CENTRO-OESTE")].size*100, figsize=(6, 6), alpha=0.5,color="chocolate")
plt.ylabel('Frequência relativa (em %)')
plt.xlabel('Renda (em reais)')
plt.show()
frenda29 = pd.cut(base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO=="NORDESTE")], bins=faixa, right=False)
t29 = (frenda29.value_counts(sort=False, normalize=True)*100).round(decimals=1)
print(t29,"\n")
plot = base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO=="NORDESTE")].plot.hist(bins=faixa,title="Histograma - Insegurança Alimentar - NORDESTE - 2009(H4)", weights=zeros_like(base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO=="NORDESTE")])+1./base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO=="NORDESTE")].size*100, figsize=(6, 6), alpha=0.5,color="darkslategray")
plt.ylabel('Frequência relativa (em %)')
plt.xlabel('Renda (em reais)')
plt.show()
base=base[(base.Insegurança_Alimentar=="Sim")]
base
writer = pd.ExcelWriter('Tabela2-2013.xlsx',engine='xlsxwriter')
base.to_excel(writer,sheet_name="Projeto_1")
writer.save()
base9=base9[(base9.Insegurança_Alimentar=="Sim")]
base9
writer = pd.ExcelWriter('Tabela2-2009.xlsx',engine='xlsxwriter')
base9.to_excel(writer,sheet_name="Projeto_1")
writer.save()
frenda3 = pd.cut(base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO=="NORTE")], bins=faixa, right=False)
t3 = (frenda3.value_counts(sort=False, normalize=True)*100).round(decimals=1)
print(t3,"\n")
plot = base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO=="NORTE")].plot.hist(bins=faixa,title="Problema Alimentar - NORTE - 2013 (H5)", weights=zeros_like(base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO=="NORTE")])+1./base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO=="NORTE")].size*100, figsize=(6, 6), alpha=0.5,color="purple")
plt.ylabel('Frequência relativa (em %)')
plt.xlabel('Renda (em reais)')
plt.show()
frenda4 = pd.cut(base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO=="NORDESTE")], bins=faixa, right=False)
t4 = (frenda4.value_counts(sort=False, normalize=True)*100).round(decimals=1)
print(t4,"\n")
plot = base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO=="NORDESTE")].plot.hist(bins=faixa,title="Problema Alimentar - NORDESTE - 2013(H6)", weights=zeros_like(base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO=="NORDESTE")])+1./base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO=="NORDESTE")].size*100, figsize=(6, 6), alpha=0.5,color="darkgreen")
plt.ylabel('Frequência relativa (em %)')
plt.xlabel('Renda (em reais)')
plt.show()
frenda39 = pd.cut(base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base.REGIAO=="CENTRO-OESTE")], bins=faixa, right=False)
t39 = (frenda39.value_counts(sort=False, normalize=True)*100).round(decimals=1)
print(t39,"\n")
plot = base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO=="CENTRO-OESTE")].plot.hist(bins=faixa,title="Problema Alimentar - CENTRO-OESTE - 2009(H7)", weights=zeros_like(base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO=="CENTRO-OESTE")])+1./base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO=="CENTRO-OESTE")].size*100, figsize=(6, 6), alpha=0.5,color="black")
plt.ylabel('Frequência relativa (em %)')
plt.xlabel('Renda (em reais)')
plt.show()
frenda49 = pd.cut(base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base.REGIAO=="CENTRO-OESTE")], bins=faixa, right=False)
t49 = (frenda49.value_counts(sort=False, normalize=True)*100).round(decimals=1)
print(t49,"\n")
plot = base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO=="NORDESTE")].plot.hist(bins=faixa,title="Problema Alimentar - NORDESTE - 2009(H8) ", weights=zeros_like(base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO=="NORDESTE")])+1./base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO=="NORDESTE")].size*100, figsize=(6, 6), alpha=0.5,color="orange")
plt.ylabel('Frequência relativa (em %)')
plt.xlabel('Renda (em reais)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Informations utiles sur les données
Step2: Données par agglomération
Step3: Paris intra-muros
Step4: Remarque
Step5: Région Parisienne
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
#%matplotlib notebook
import matplotlib
matplotlib.rcParams['figure.figsize'] = (9, 9)
import pandas as pd
url = "https://www.data.gouv.fr/fr/datasets/r/1fee314d-c278-424f-a029-a74d877eb185"
df2016 = pd.read_csv(url,
encoding='iso-8859-1',
sep=';',
decimal=',')
url = "https://www.data.gouv.fr/fr/datasets/r/15d902ed-4dc3-457d-9c5d-bfe1151cb573"
df2015 = pd.read_csv(url,
encoding='iso-8859-1',
sep=';',
decimal=',')
url = "https://www.data.gouv.fr/fr/datasets/r/42aaf838-46c9-4434-95a9-00173c6d4627"
df2014 = pd.read_csv(url,
encoding='iso-8859-1',
sep=';',
decimal=',')
frames = [df2014, df2015, df2016]
df = pd.concat(frames, ignore_index=True)
df.shape
df.columns
#df.Observatory.value_counts().plot.barh()
df.agglomeration.value_counts().plot.barh()
dfp = df[df.agglomeration == "Paris intra-muros"]
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 5))
dfp[dfp.Data_year == 2014].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax1)
dfp[dfp.Data_year == 2015].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax2)
#dfp[dfp.Data_year == 2016].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax3)
dfp[dfp.Data_year == 2014].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
s=dfp[dfp.Data_year == 2014].nombre_obsservations,
alpha=0.1)
dfp[dfp.Data_year == 2015].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
#s=dfp[dfp.Data_year == 2015].nombre_obsservations,
#c="blue",
alpha=0.5)
dfp = df[df.agglomeration == "Agglomération parisienne"]
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 5))
dfp[dfp.Data_year == 2014].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax1)
dfp[dfp.Data_year == 2015].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax2)
dfp[dfp.Data_year == 2014].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
#c=dfp[dfp.Data_year == 2014].nombre_obsservations,
alpha=0.5)
#cmap="Blues")
dfp[dfp.Data_year == 2015].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
#s=dfp[dfp.Data_year == 2015].nombre_obsservations,
#c="blue",
alpha=0.5)
dfp = df[df.agglomeration == "Agglomération parisienne (hors Paris)"]
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 5))
dfp[dfp.Data_year == 2014].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax1)
dfp[dfp.Data_year == 2015].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax2)
dfp[dfp.Data_year == 2014].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
#c=dfp[dfp.Data_year == 2014].nombre_obsservations,
alpha=0.5)
#cmap="Blues")
dfp[dfp.Data_year == 2015].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
#s=dfp[dfp.Data_year == 2015].nombre_obsservations,
#c="blue",
alpha=0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 5.5 활성화 함수 계층 구현하기
Step2: Sigmoid
Step3: 5.6 Affine / Softmax 계층 구현하기
Step4: 5.6.3 softmax-with-loss 계층
Step5: 5.7 오차역전파
Step6: 학습 구현
Step7: 6. 학습 관련 기술들
Step8: 6.1.4 모멘텀
Step9: 6.1.5 AdaGrad
Step10: 6.1.6 Adam
Step11: Xaviear 초기값
Step12: 6.3 배치 정규화
Step18: 7.4.2 im2col로 데이터 전개하기
|
<ASSISTANT_TASK:>
Python Code:
class MulLayer:
def __init__(self):
self.x = None
self.y = None # 순전파시의 입력 값을 유지하기 위해 사용
def forward(self, x, y):
self.x = x
self.y = y
out = x * y
return out
def backward(self, dout):
dx = dout * self.y
dy = dout * self.x # 상류에서 넘어온 미분에 순전파 값을 바꿔 곱해서 하류로 흘림
return dx, dy
apple = 100
apple_num = 2
tax = 1.1
mul_apple_layer = MulLayer()
mul_tax_layer = MulLayer()
apple_price = mul_apple_layer.forward(apple, apple_num)
apple_price
price = mul_tax_layer.forward(apple_price, tax)
print(price)
# 역전파
dprice = 1
dapple_price, dtax = mul_tax_layer.backward(dprice)
dapple_price
dtax
dapple,dapple_num = mul_apple_layer.backward(dapple_price)
dapple
dapple_num
dtax
class AddLayer:
def __init__(self):
pass
def forward(self, x, y):
out = x + y
return out
def backward(self, dout):
dx = dout * 1
dy = dout * 1
return dx, dy
apple = 100
apple_num = 2
orange = 150
orange_num = 3
tax = 1.1
# layer
mul_apple_layer = MulLayer()
mul_orange_layer = MulLayer()
add_apple_orange_layer = AddLayer()
mul_tax_layer = MulLayer()
# forward
apple_price = mul_apple_layer.forward(apple, apple_num) # (1)
orange_price = mul_orange_layer.forward(orange, orange_num) # (2)
all_price = add_apple_orange_layer.forward(apple_price, orange_price) # (3)
price = mul_tax_layer.forward(all_price, tax) # (4)
# backward
dprice = 1
dall_price, dtax = mul_tax_layer.backward(dprice) # (4)
dapple_price, dorange_price = add_apple_orange_layer.backward(dall_price) # (3)
dorange, dorange_num = mul_orange_layer.backward(dorange_price) # (2)
dapple, dapple_num = mul_apple_layer.backward(dapple_price) # (1)
print("price:", int(price))
print("dApple:", dapple)
print("dApple_num:", int(dapple_num))
print("dOrange:", dorange)
print("dOrange_num:", int(dorange_num))
print("dTax:", dtax)
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = (x <= 0)
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Sigmoid:
def __init__(self):
self.out = None
def forward(self, x):
out = 1 / (1 + np.exp(-x))
self.out = out
return out
def backward(Self, dout):
dx = dout * (1.0 - self.out) * self.out
return dx
class Affine:
def __init__(self, W, b):
self.W = W
self.b = b
self.x = None
self.dW = None
self.db = None
def forward(self, x):
self.x = x
out = np.dot(x, self.W) + self.b
return out
def backward(self, dout):
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
self.db = np.sum(dout, axis=0)
return dx
class SoftmaxWithLoss:
def __init__(self):
self.loss = None
self.y = None
self.t = None
def forward(self, x, t):
self.t = t
self.y = softmax(x)
self.loss = cross_entropy_error(self.y, self.t)
return self.loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
dx = (self.y - self.t) / batch_size
return dx
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정
import numpy as np
from common.layers import *
from common.gradient import numerical_gradient
from collections import OrderedDict
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std = 0.01):
# 가중치 초기화
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
# 계층 생성
self.layers = OrderedDict()
self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
self.layers['Relu1'] = Relu()
self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
self.lastLayer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
# x : 입력 데이터, t : 정답 레이블
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
if t.ndim != 1 : t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# x : 입력 데이터, t : 정답 레이블
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
def gradient(self, x, t):
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
# 결과 저장
grads = {}
grads['W1'], grads['b1'] = self.layers['Affine1'].dW, self.layers['Affine1'].db
grads['W2'], grads['b2'] = self.layers['Affine2'].dW, self.layers['Affine2'].db
return grads
sys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정
from dataset.mnist import load_mnist
# from two_layer_net import TwoLayerNet
# 데이터 읽기
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
x_batch = x_train[:3]
t_batch = t_train[:3]
grad_numerical = network.numerical_gradient(x_batch, t_batch)
grad_backprop = network.gradient(x_batch, t_batch)
# 각 가중치의 절대 오차의 평균을 구한다.
for key in grad_numerical.keys():
diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]) )
print(key + ":" + str(diff))
# 데이터 읽기
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
for i in range(iters_num):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 기울기 계산
#grad = network.numerical_gradient(x_batch, t_batch) # 수치 미분 방식
grad = network.gradient(x_batch, t_batch) # 오차역전파법 방식(훨씬 빠르다)
# 갱신
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print(train_acc, test_acc)
class SGD:
def __init__(self, lr=0.01):
self.lr = lr
def update(self, params, grads):
for key in params.keys():
params[key] -= self.lr * grads[key]
class Momentum:
def __init__(self, lr=0.01, momentum=0.9):
self.lr = lr
self.momentum = momentum
self.v = None
def update(self, params, grads):
if self.v is None:
self.v = {}
for key, val in params.items():
self.v[key] = np.zeros_like(val)
for key in params.keys():
self.v[key] = self.momentum*self.v[key] - self.lr*grads[key]
params[key] += self.v[key]
# 인스턴스 변수 v가 물체의 속도.
# SGD와 비교하면 지그재그 정도가 덜함
class AdaGrad:
def __init__(self, lr=0.01):
self.lr = lr
self.h = None
def update(self, params, grads):
if self.h is None:
self.h = {}
for key, val in params.items():
self.h[key] = np.zeros_like(val)
for key in params.key():
self.h[key] += grads[key] * grads[key]
params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7)
import matplotlib.pyplot as plt
%matplotlib inline
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def ReLU(x):
return np.maximum(0, x)
def tanh(x):
return np.tanh(x)
input_data = np.random.randn(1000, 100) # 1000개의 데이터
node_num = 100 # 각 은닉층의 노드(뉴런) 수
hidden_layer_size = 5 # 은닉층이 5개
activations = {} # 이곳에 활성화 결과를 저장
x = input_data
for i in range(hidden_layer_size):
if i != 0:
x = activations[i-1]
# 초깃값을 다양하게 바꿔가며 실험해보자!
# w = np.random.randn(node_num, node_num) * 1
# w = np.random.randn(node_num, node_num) * 0.01
w = np.random.randn(node_num, node_num) * np.sqrt(1.0 / node_num)
# w = np.random.randn(node_num, node_num) * np.sqrt(2.0 / node_num)
a = np.dot(x, w)
# 활성화 함수도 바꿔가며 실험해보자!
z = sigmoid(a)
# z = ReLU(a)
# z = tanh(a)
activations[i] = z
# 히스토그램 그리기
for i, a in activations.items():
plt.subplot(1, len(activations), i+1)
plt.title(str(i+1) + "-layer")
if i != 0: plt.yticks([], [])
# plt.xlim(0.1, 1)
# plt.ylim(0, 7000)
plt.hist(a.flatten(), 30, range=(0,1))
plt.show()
# (가중치의 표준 편차:1) 데이터가 0과 1에 치우쳐 분포하게 되면 역전파의 기울기 값이 점점 작아지다 사라짐 -> 기울기 소실 (gradient vanishing)
# (가중치의 표준 편차:0.01) 데이터가 0.5 부근에 집중. 치우치는 경우 표현력을 제한하는 관점에서 문제.
from common.util import smooth_curve
from common.multi_layer_net import MultiLayerNet
from common.optimizer import SGD
# 0. MNIST 데이터 읽기==========
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
train_size = x_train.shape[0]
batch_size = 128
max_iterations = 2000
# 1. 실험용 설정==========
weight_init_types = {'std=0.01': 0.01, 'Xavier': 'sigmoid', 'He': 'relu'}
optimizer = SGD(lr=0.01)
networks = {}
train_loss = {}
for key, weight_type in weight_init_types.items():
networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100],
output_size=10, weight_init_std=weight_type)
train_loss[key] = []
# 2. 훈련 시작==========
for i in range(max_iterations):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
for key in weight_init_types.keys():
grads = networks[key].gradient(x_batch, t_batch)
optimizer.update(networks[key].params, grads)
loss = networks[key].loss(x_batch, t_batch)
train_loss[key].append(loss)
if i % 100 == 0:
print("===========" + "iteration:" + str(i) + "===========")
for key in weight_init_types.keys():
loss = networks[key].loss(x_batch, t_batch)
print(key + ":" + str(loss))
# 3. 그래프 그리기==========
markers = {'std=0.01': 'o', 'Xavier': 's', 'He': 'D'}
x = np.arange(max_iterations)
for key in weight_init_types.keys():
plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key)
plt.xlabel("iterations")
plt.ylabel("loss")
plt.ylim(0, 2.5)
plt.legend()
plt.show()
import numpy as np
x = np.random.rand(10, 1, 28, 28)
x.shape
# 10개중 첫번째 데이터에 접근할 경우
x[0].shape # (1, 28, 28)
x[1].shape
# 첫 데이터의 첫 채널 공간에 접근할 경우
x[0, 0] # 또는 x[0][0]
from common.util import im2col
# input_data : (데이터 수, 채널 수, 높이, 너비)
# filter_h - 필터의 높이
# filter_w - 필터의 너비
# stride - 스트라이드
# pad - 패딩
import sys, os
sys.path.append(os.pardir)
x1 = np.random.rand(1, 3, 7, 7)
col1 = im2col(x1, 5, 5, stride=1, pad=0)
print(col1.shape)
x2 = np.random.rand(10, 3, 7, 7)
col2 = im2col(x2, 5, 5, stride=1, pad=0)
print(col2.shape)
class Convolution:
def __init__(self, W, b, stride=1, pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
def forward(self, x):
FN, C, FH, FW = self.W.shape
N, C, H, W = x.shape
out_h = int(1 + (H + 2*self.pad - FH) / self.stride)
out_w = int(1 + (W + 2*self.pad - FW) / self.stride)
col = im2col(x, FH, FW, self.stride, self.pad)
col_W = self.W.reshape(FN, -1).T # 필터 전개
out = np.dot(col, col_W) + self.b
out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)
return out
# reshape에 -1을 지정하면 다차원 배열의 원소 수가 변환 후에도 똑같이 유지되도록 적절히 묶어줌!
class Pooling:
def __init__(self, pool_h, pool_w, stride=1, pad=0):
self.pool_h = pool_h
self.pool_w = pool_w
self.stride = stride
self.pad = pad
def forward(self, x):
N, C, H, W = x.shape
out_h = int(1 + ( H - self.pool_h) / self.stride )
out_w = int(1 + ( W - self.pool_w) / self.stride )
col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
col = col.reshape(-1, self.pool_h*self.pool_w)
out = np.max(col, axis=1)
out = out.reshape(N, out_h, out_w, C).transpose(0, 3,1,2)
return out
class SimpleConvNet:
단순한 합성곱 신경망
conv - relu - pool - affine - relu - affine - softmax
Parameters
----------
input_size : 입력 크기(MNIST의 경우엔 784)
hidden_size_list : 각 은닉층의 뉴런 수를 담은 리스트(e.g. [100, 100, 100])
output_size : 출력 크기(MNIST의 경우엔 10)
activation : 활성화 함수 - 'relu' 혹은 'sigmoid'
weight_init_std : 가중치의 표준편차 지정(e.g. 0.01)
'relu'나 'he'로 지정하면 'He 초깃값'으로 설정
'sigmoid'나 'xavier'로 지정하면 'Xavier 초깃값'으로 설정
def __init__(self, input_dim=(1, 28, 28),
conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
hidden_size=100, output_size=10, weight_init_std=0.01):
filter_num = conv_param['filter_num']
filter_size = conv_param['filter_size']
filter_pad = conv_param['pad']
filter_stride = conv_param['stride']
input_size = input_dim[1]
conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1
pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))
# 가중치 초기화
self.params = {}
self.params['W1'] = weight_init_std * \
np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
self.params['b1'] = np.zeros(filter_num)
self.params['W2'] = weight_init_std * \
np.random.randn(pool_output_size, hidden_size)
self.params['b2'] = np.zeros(hidden_size)
self.params['W3'] = weight_init_std * \
np.random.randn(hidden_size, output_size)
self.params['b3'] = np.zeros(output_size)
# 계층 생성
self.layers = OrderedDict()
self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'],
conv_param['stride'], conv_param['pad'])
self.layers['Relu1'] = Relu()
self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
self.layers['Relu2'] = Relu()
self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])
self.last_layer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
손실 함수를 구한다.
Parameters
----------
x : 입력 데이터
t : 정답 레이블
y = self.predict(x)
return self.last_layer.forward(y, t)
def accuracy(self, x, t, batch_size=100):
if t.ndim != 1 : t = np.argmax(t, axis=1)
acc = 0.0
for i in range(int(x.shape[0] / batch_size)):
tx = x[i*batch_size:(i+1)*batch_size]
tt = t[i*batch_size:(i+1)*batch_size]
y = self.predict(tx)
y = np.argmax(y, axis=1)
acc += np.sum(y == tt)
return acc / x.shape[0]
def numerical_gradient(self, x, t):
기울기를 구한다(수치미분).
Parameters
----------
x : 입력 데이터
t : 정답 레이블
Returns
-------
각 층의 기울기를 담은 사전(dictionary) 변수
grads['W1']、grads['W2']、... 각 층의 가중치
grads['b1']、grads['b2']、... 각 층의 편향
loss_w = lambda w: self.loss(x, t)
grads = {}
for idx in (1, 2, 3):
grads['W' + str(idx)] = numerical_gradient(loss_w, self.params['W' + str(idx)])
grads['b' + str(idx)] = numerical_gradient(loss_w, self.params['b' + str(idx)])
return grads
def gradient(self, x, t):
기울기를 구한다(오차역전파법).
Parameters
----------
x : 입력 데이터
t : 정답 레이블
Returns
-------
각 층의 기울기를 담은 사전(dictionary) 변수
grads['W1']、grads['W2']、... 각 층의 가중치
grads['b1']、grads['b2']、... 각 층의 편향
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.last_layer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
# 결과 저장
grads = {}
grads['W1'], grads['b1'] = self.layers['Conv1'].dW, self.layers['Conv1'].db
grads['W2'], grads['b2'] = self.layers['Affine1'].dW, self.layers['Affine1'].db
grads['W3'], grads['b3'] = self.layers['Affine2'].dW, self.layers['Affine2'].db
return grads
def save_params(self, file_name="params.pkl"):
params = {}
for key, val in self.params.items():
params[key] = val
with open(file_name, 'wb') as f:
pickle.dump(params, f)
def load_params(self, file_name="params.pkl"):
with open(file_name, 'rb') as f:
params = pickle.load(f)
for key, val in params.items():
self.params[key] = val
for i, key in enumerate(['Conv1', 'Affine1', 'Affine2']):
self.layers[key].W = self.params['W' + str(i+1)]
self.layers[key].b = self.params['b' + str(i+1)]
import pickle
import numpy as np
from collections import OrderedDict
from common.layers import *
from common.gradient import numerical_gradient
import numpy as np
import matplotlib.pyplot as plt
# from simple_convnet import SimpleConvNet
def filter_show(filters, nx=8, margin=3, scale=10):
c.f. https://gist.github.com/aidiary/07d530d5e08011832b12#file-draw_weight-py
FN, C, FH, FW = filters.shape
ny = int(np.ceil(FN / nx))
fig = plt.figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(FN):
ax = fig.add_subplot(ny, nx, i+1, xticks=[], yticks=[])
ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
network = SimpleConvNet()
# 무작위(랜덤) 초기화 후의 가중치
filter_show(network.params['W1'])
# 학습된 가중치
network.load_params("params.pkl")
filter_show(network.params['W1'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Este pequeño script muestra algunos aspectos importantes de la sintaxis de Python.
Step2: Los parentesis también se usan para pasar parámetros a una función cuando se llama. En el siguiente snippet de código, la función print() se usa para mostrar, por ej, los contenidos de una variable. La función se "llama" con un par de parentesis con los argumentos de la función adentro.
Step3: Algunas funciones se llaman sin argumentos y actuan sobre el objeto que evalúan. Los parentesis deben ser usados igual, aunque la función tenga argumentos.
|
<ASSISTANT_TASK:>
Python Code:
# set the midpoint
midpoint = 5
# make two empty lists
lower = []; upper = []
# split the numbers into lower and upper
for i in range(10):
if (i < midpoint):
lower.append(i)
else:
upper.append(i)
print("lower:", lower)
print("upper:", upper)
print(2*(3+4))
print(2*3+4)
print((2*3)+4)
x = 3
print('first value:', x)
print('second value:', 2)
L = [4,2,3,1]
L.sort()
print(L)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Quick Start
Step2: Suppose one has a calibrated spectrum and wants to compute the vega magnitude throug the HST WFC3 F110W passband,
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pylab as plt
import numpy as np
import sys
sys.path.append('../')
from pyphot import sandbox as pyphot
from pyphot.svo import get_pyphot_filter as get_filter_from_svo
lst = ["2MASS/2MASS.J", "2MASS/2MASS.H", "2MASS/2MASS.Ks",
"HST/ACS_WFC.F475W", "HST/ACS_WFC.F814W"]
filters = [get_filter_from_svo(k) for k in lst]
filters
# get the internal default library of passbands filters
filters[0].info(show_zeropoints=True)
# convert to magnitudes
import numpy as np
# We'll use Vega spectrum as example
from pyphot import Vega
vega = Vega()
f = filters[-1]
# compute the integrated flux through the filter f
# note that it work on many spectra at once
fluxes = f.get_flux(vega.wavelength, vega.flux, axis=-1)
# Note that fluxes is now with units of erg/s/cm2/AA
# pyphot gives Vega in flam and can convert between flux density units.
fluxes, vega.wavelength, vega.flux
# convert to vega magnitudes
mags = -2.5 * np.log10(fluxes.value) - f.Vega_zero_mag
print("Vega magnitude of Vega in {0:s} is : {1:f} mag".format(f.name, mags))
mags = -2.5 * np.log10(fluxes.value) - f.AB_zero_mag
print("AB magnitude of Vega in {0:s} is : {1:f} mag".format(f.name, mags))
mags = -2.5 * np.log10(fluxes.value) - f.ST_zero_mag
print("ST magnitude of Vega in {0:s} is : {1:f} mag".format(f.name, mags))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tip label styling
Step2: tip_labels_align
Step3: tip_labels_colors
Step4: tip_labels_style
Step5: Node labels styling
Step6: node_labels_style
Step7: Node styling
Step8: node_colors
Step9: node_markers
Step10: Rectangular markers can be drawn in many dimensions. Designate "r2x1" for a box that is twice as wide as it is tall.
Step11: node_style
Step12: node_hover
Step13: Layout
Step14: todo
Step15: Styles
Step16: Scalebar
|
<ASSISTANT_TASK:>
Python Code:
import toytree
import toyplot
import numpy as np
# a tree to use for examples
url = "https://eaton-lab.org/data/Cyathophora.tre"
rtre = toytree.tree(url).root(wildcard='prz')
# hide tip labels
rtre.draw(tip_labels=False);
# get tip labels from tree
tipnames = rtre.get_tip_labels()
# modify list so that html italic tags surround text
italicnames = ["<i>{}</i>".format(i) for i in tipnames]
# enter the list of names to tip_labels
rtre.draw(tip_labels=italicnames);
rtre.draw(tip_labels_align=True);
# use color from favored toytree color scheme
rtre.draw(
tip_labels_align=True,
tip_labels_colors=toytree.colors[1],
);
# enter a list of colors by name
rtre.draw(
tip_labels_align=True,
tip_labels_colors=(['goldenrod'] * 11) + (["mediumseagreen"] * 2),
);
# make list of hex color values based on tip labels
colorlist = ["#d6557c" if "rex" in tip else "#5384a3" for tip in rtre.get_tip_labels()]
rtre.draw(
tip_labels_align=True,
tip_labels_colors=colorlist
);
rtre.draw(
tip_labels_style={
"fill": "#262626",
"font-size": "11px",
"-toyplot-anchor-shift": "15px",
}
);
# shows node idx labels on all nodes
rtre.draw(node_labels=True);
# suppreses nodes
rtre.draw(node_labels=False);
# suppresses node labels, sizes ensures nodes are still shown
rtre.draw(node_labels=False, node_sizes=10);
# shortcut for 'default' features always present in TreeNodes, suppresses tip nodes.
rtre.draw(node_labels="support");
# build a list of values in the correct node plot order
sups = rtre.get_node_values("support", show_root=True, show_tips=True)
rtre.draw(node_labels=sups);
rtre.draw(
node_labels='idx',
node_labels_style={
"fill": "#262626",
"font-size": "8px",
}
);
rtre.draw(
node_labels=False,
node_sizes=10,
);
# draw random values to use for node sizes
np.random.seed(1234)
sizes = np.random.uniform(5, 15, rtre.nnodes)
rtre.draw(
node_labels=False,
node_sizes=sizes,
);
# set a single color for all nodes
rtre.draw(
node_labels=False,
node_sizes=10,
node_colors=toytree.colors[1],
);
rtre.get_node_values("support", 1, 0)
# get list of sizes and colors in node plot order with tip nodes suppressed
sizes = [10 if i else 0 for i in rtre.get_node_values('support', 1, 0)]
colors = ['black' if i=='100' else 'red' for i in rtre.get_node_values('support', 1, 0)]
# enter a lists of values
rtre.draw(
node_labels=None,
node_sizes=sizes,
node_colors=colors,
);
# enter a lists of values
rtre.draw(
node_labels="support",
node_markers="o"
);
# enter a lists of values
rtre.draw(
height=350,
node_labels=None,
node_sizes=[0 if i else 8 for i in rtre.get_node_values(None, 1, 0)],
node_markers="s",
node_colors=toytree.colors[1],
);
# rectangles for nodes
rtre.draw(
width=600,
height=400,
node_labels="support",
node_labels_style={"font-size": "11px"},
node_markers="r2x1.25",
node_sizes=12,
);
# the classic "ape" style tree plot look
rtre.draw(
width=600,
height=400,
node_labels="support",
node_labels_style={"font-size": "10px"},
node_markers="r2x1.25",
node_sizes=12,
node_style={
"fill": "lightgrey",
"stroke": "black",
"stroke-width": 0.75,
}
);
rtre.draw(node_hover=True, node_labels=True, node_sizes=15);
ttre = toytree.rtree.coaltree(20, seed=123)
ttre.draw(
layout='d',
tip_labels_align=True,
node_sizes=[8 if i else 0 for i in ttre.get_node_values()],
node_style={"stroke": "black"},
width=350,
height=300,
);
ttre = toytree.rtree.unittree(40, seed=123)
ttre.draw(
layout='c',
edge_type='c',
node_sizes=[8 if i else 0 for i in ttre.get_node_values()],
node_style={"stroke": "black"},
width=400,
height=400,
);
ttre = toytree.rtree.unittree(40, seed=123)
ttre.draw(
layout='c',
edge_type='p',
width=400,
height=400,
);
rtre.draw(
tip_labels_align=True,
edge_align_style={
"stroke": "violet",
"stroke-width": 1.5,
"stroke-dasharray": "2,5" # size of dash, spacing of dashes
});
rtre.draw(tree_style='n');
# generate a random coalescent tree and draw in 'coalescent' style
randomtre = toytree.rtree.coaltree(ntips=10, seed=666)
randomtre.draw(tree_style='c');
rtre.draw(scalebar=True);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'hadgem3-gc31-hm', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Annotating bad spans of data
Step2: .. sidebar
Step3: Now we can confirm that the annotations are centered on the EOG events. Since
Step4: See the section tut-section-programmatic-annotations for more details
Step5: The values that are appropriate are dataset- and hardware-dependent, so some
Step6: Notice that we've passed reject_by_annotation=False above, in order to
Step7: More importantly, note that many more epochs are rejected (~20% instead of
Step8: Finally, it should be noted that "dropped" epochs are not necessarily deleted
Step9: Alternatively, if rejection thresholds were not originally given to the
|
<ASSISTANT_TASK:>
Python Code:
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
events_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw-eve.fif')
events = mne.read_events(events_file)
fig = raw.plot()
fig.canvas.key_press_event('a')
eog_events = mne.preprocessing.find_eog_events(raw)
onsets = eog_events[:, 0] / raw.info['sfreq'] - 0.25
durations = [0.5] * len(eog_events)
descriptions = ['bad blink'] * len(eog_events)
blink_annot = mne.Annotations(onsets, durations, descriptions,
orig_time=raw.info['meas_date'])
raw.set_annotations(blink_annot)
eeg_picks = mne.pick_types(raw.info, meg=False, eeg=True)
raw.plot(events=eog_events, order=eeg_picks)
reject_criteria = dict(mag=3000e-15, # 3000 fT
grad=3000e-13, # 3000 fT/cm
eeg=100e-6, # 100 μV
eog=200e-6) # 200 μV
flat_criteria = dict(mag=1e-15, # 1 fT
grad=1e-13, # 1 fT/cm
eeg=1e-6) # 1 μV
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, reject_tmax=0,
reject=reject_criteria, flat=flat_criteria,
reject_by_annotation=False, preload=True)
epochs.plot_drop_log()
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, reject_tmax=0,
reject=reject_criteria, flat=flat_criteria, preload=True)
epochs.plot_drop_log()
print(epochs.drop_log)
epochs.drop_bad()
stronger_reject_criteria = dict(mag=2000e-15, # 2000 fT
grad=2000e-13, # 2000 fT/cm
eeg=100e-6, # 100 μV
eog=100e-6) # 100 μV
epochs.drop_bad(reject=stronger_reject_criteria)
print(epochs.drop_log)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load CML example data
Step2: Do a simple standard processing to get rain rates for each CML
Step3: Do IDW interpolation of CML rain rates
Step4: Perform interpolation for all time steps
Step5: Calculate CML coverage mask
Step6: Coverage for 0.1 degree coverage around CMLs.
Step7: Plot CML rainfall sum and apply coverage map
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pycomlink as pycml
import matplotlib.pyplot as plt
from tqdm import tqdm
cml_list = pycml.io.examples.get_75_cmls()
fig, ax = plt.subplots()
for cml in cml_list:
cml.plot_line(ax=ax, color='k')
for cml in tqdm(cml_list):
window_length = 60
threshold = 1.0
cml.process.wet_dry.std_dev(window_length=window_length, threshold=threshold)
cml.process.baseline.linear()
cml.process.baseline.calc_A()
cml.process.A_R.calc_R()
cml_interp = pycml.spatial.interpolator.ComlinkGridInterpolator(
cml_list=cml_list,
resolution=0.01,
interpolator=pycml.spatial.interpolator.IdwKdtreeInterpolator())
ds = cml_interp.loop_over_time()
ds
fig, ax = plt.subplots(3, 3, sharex=True, sharey=True, figsize=(12,12))
for i, axi in enumerate(ax.flat):
for cml in cml_list:
cml.plot_line(ax=axi, color='k')
pc = axi.pcolormesh(ds.lon,
ds.lat,
ds.R.isel(time=20+i),
cmap=plt.get_cmap('BuPu', 8),
vmin=0,
vmax=20)
axi.set_title(cml_interp.df_cmls.index[20+i])
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.95, 0.15, 0.02, 0.7])
fig.colorbar(pc, cax=cbar_ax, label='Hourly rainfall sum in mm');
cml_coverage_mask = pycml.spatial.coverage.calc_coverage_mask(
cml_list=cml_list,
xgrid=ds.lon.values,
ygrid=ds.lat.values,
max_dist_from_cml=0.05)
fig, ax = plt.subplots()
for cml in cml_list:
cml.plot_line(ax=ax, color='k')
ax.pcolormesh(ds.lon, ds.lat, cml_coverage_mask, cmap='gray');
cml_coverage_mask = pycml.spatial.coverage.calc_coverage_mask(
cml_list=cml_list,
xgrid=ds.lon.values,
ygrid=ds.lat.values,
max_dist_from_cml=0.1)
fig, ax = plt.subplots()
for cml in cml_list:
cml.plot_line(ax=ax, color='k')
ax.pcolormesh(ds.lon, ds.lat, cml_coverage_mask, cmap='gray');
fig, ax = plt.subplots()
for cml in cml_list:
cml.plot_line(ax=ax, color='k')
pc = ax.pcolormesh(
ds.lon,
ds.lat,
ds.R.sum(dim='time').where(cml_coverage_mask),
cmap=plt.get_cmap('BuPu', 32))
plt.colorbar(pc, label='rainfall sum in mm');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pandas provides a number of read_* options, including read_csv, which we will use here.
Step2: One of the first things we can do is examine the columns that the dataframe has identified.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("data-readonly/IL_Building_Inventory.csv")
df.columns
df.head()
df.tail()
df.describe()
df.dtypes
df.groupby(["Agency Name"])["Square Footage"].sum()
df["Agency Name"].value_counts()
df.describe()
df["Total Floors"].median()
df.median()
df.quantile([0.1, 0.2, 0.9])
df["Agency Name"].apply(lambda a: a.upper()).head()
df["Agency Name"].apply(lambda a: a).head()
"This is my string".lower()
"this is my string. here is another.".capitalize()
df = pd.read_csv("data-readonly/IL_Building_Inventory.csv", na_values={'Year Acquired': 0, 'Year Constructed': 0})
df.count()
df.iloc[10]
df.iloc[10]
df.loc[10, ["County", "Senate Dist"]]
year = df.groupby("Year Acquired")
df.index = df["Year Acquired"]
df.head()
df.loc[1970].head()
df.head()
df.loc[1974]
df.loc[0]
df = pd.read_csv("data-readonly/IL_Building_Inventory.csv",
na_values={'Year Acquired': 0, 'Year Constructed': 0})
df.index
df2 = df.set_index("Year Acquired")
df2.index
df2.loc[1975].head()
df2.iloc[[1974, 1975]]
keith = df.set_index("City")
keith.loc["Kinmundy"].describe()
names = ["date", "city", "state", "country", "shape", "duration_seconds", "duration_reported", "description", "report_date", "latitude", "longitude"]
ufo = pd.read_csv("data-readonly/ufo-scrubbed-geocoded-time-standardized.csv",
names = names, parse_dates = ["date", "report_date"])
ufo.dtypes
ufo.describe()
sum_seconds = ufo.groupby("state")["duration_seconds"].sum()
sum_seconds.sort_values() / (365*24*3600)
states = ufo.groupby("state")
states["duration_seconds"].mean()
ufo.loc[ ufo["duration_seconds"] > 900 , ["state", "duration_seconds", "shape"] ].groupby("state")["duration_seconds"].sum()
ufo["date"].min()
ufo["date"].max()
first_sighting = ufo.groupby("state")["date"].min()
last_sighting = ufo.groupby("state")["date"].max()
last_sighting - first_sighting
first_sighting.index
ufo["state"].nunique()
ufo["country"].unique()
ufo["country"] = ufo["country"].astype("category")
ufo["shape"] = ufo["shape"].astype("category")
ufo["state"] = ufo["state"].astype("category")
ufo["city"].nunique()
ufo.shape
ufo.groupby("city").count().nlargest(10, "date")
ufo.dtypes
shape_times = ufo.groupby("shape")["duration_seconds"].sum()
shape_times.index
shape_times.plot()
shape_times.sort_values().plot()
shape_times.nlargest(5)
shape_state = ufo.groupby(["state", "shape"])
times = shape_state["duration_seconds"].sum()
times.loc[ ["il", "mi", "oh"], ["sphere", "unknown"] ]
times.loc["il":"ok", "sphere":"unknown"]
unsorted_nonsense = times.sort_index()
unsorted_nonsense.loc["il":"ok"]
unsorted_nonsense.loc["il":"ok"].plot()
ufo.set_index("date", inplace=True)
ufo.resample("A")["duration_seconds"].sum()
myplot = ufo.resample("10A")["duration_seconds"].sum().plot()
myplot.set_yscale('log')
r = ufo.resample("10A")
r["duration_seconds"].sum()
ufo.resample("W")["duration_seconds"].sum()
day_of_week = ufo.index.dayofweek
ufo["day_of_week"] = day_of_week
ufo.groupby("day_of_week")["duration_seconds"].sum().plot()
ufo.groupby("state").sum().loc["tx"]
ufo.reset_index()
week = ufo.set_index( ["day_of_week", "state", "shape"] )
week.loc[, 'il', 'cigar']
new_week = week.sort_index()
ufo = ufo.reset_index()
ufo.index = ufo.date
ufo.index.dayofweek
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: (1b) Vetores Esparsos
Step2: (1c) Atributos OHE como vetores esparsos
Step4: (1d) Função de codificação OHE
Step5: (1e) Aplicar OHE em uma base de dados
Step6: Part 2
Step7: (2b) Dicionário OHE de atributos únicos
Step9: (2c) Criação automática do dicionário OHE
Step10: Part 3
Step11: (3a) Carregando e dividindo os dados
Step13: (3b) Extração de atributos
Step14: (3c) Crie o dicionário de OHE dessa base de dados
Step16: (3d) Aplicando OHE à base de dados
Step19: Visualização 1
Step21: (3e) Atributos não observados
Step22: Part 4
Step24: (4b) Log loss
Step25: (4c) Baseline log loss
Step27: (4d) Probabilidade da Predição
Step29: (4e) Avalie o modelo
Step30: (4f) log-loss da validação
Step31: Visualização 2
Step33: Parte 5
Step35: (5b) Criando hashed features
Step37: (5c) Esparsidade
Step38: (5d) Modelo logístico com hashed features
Step39: (5e) Avaliando a base de testes
|
<ASSISTANT_TASK:>
Python Code:
# Data for manual OHE
# Note: the first data point does not include any value for the optional third feature
sampleOne = [(0, 'mouse'), (1, 'black')]
sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
sampleDataRDD = sc.parallelize([sampleOne, sampleTwo, sampleThree])
# EXERCICIO
sampleOHEDictManual = {}
<COMPLETAR>
# TEST One-hot-encoding (1a)
from test_helper import Test
Test.assertEqualsHashed(sampleOHEDictManual[(0,'bear')],
'b6589fc6ab0dc82cf12099d1c2d40ab994e8410c',
"incorrect value for sampleOHEDictManual[(0,'bear')]")
Test.assertEqualsHashed(sampleOHEDictManual[(0,'cat')],
'356a192b7913b04c54574d18c28d46e6395428ab',
"incorrect value for sampleOHEDictManual[(0,'cat')]")
Test.assertEqualsHashed(sampleOHEDictManual[(0,'mouse')],
'da4b9237bacccdf19c0760cab7aec4a8359010b0',
"incorrect value for sampleOHEDictManual[(0,'mouse')]")
Test.assertEqualsHashed(sampleOHEDictManual[(1,'black')],
'77de68daecd823babbb58edb1c8e14d7106e83bb',
"incorrect value for sampleOHEDictManual[(1,'black')]")
Test.assertEqualsHashed(sampleOHEDictManual[(1,'tabby')],
'1b6453892473a467d07372d45eb05abc2031647a',
"incorrect value for sampleOHEDictManual[(1,'tabby')]")
Test.assertEqualsHashed(sampleOHEDictManual[(2,'mouse')],
'ac3478d69a3c81fa62e60f5c3696165a4e5e6ac4',
"incorrect value for sampleOHEDictManual[(2,'mouse')]")
Test.assertEqualsHashed(sampleOHEDictManual[(2,'salmon')],
'c1dfd96eea8cc2b62785275bca38ac261256e278',
"incorrect value for sampleOHEDictManual[(2,'salmon')]")
Test.assertEquals(len(sampleOHEDictManual.keys()), 7,
'incorrect number of keys in sampleOHEDictManual')
import numpy as np
from pyspark.mllib.linalg import SparseVector
# EXERCICIO
aDense = np.array([0., 3., 0., 4.])
aSparse = SparseVector(<COMPLETAR>)
bDense = np.array([0., 0., 0., 1.])
bSparse = SparseVector(<COMPLETAR>)
w = np.array([0.4, 3.1, -1.4, -.5])
print aDense.dot(w)
print aSparse.dot(w)
print bDense.dot(w)
print bSparse.dot(w)
# TEST Sparse Vectors (1b)
Test.assertTrue(isinstance(aSparse, SparseVector), 'aSparse needs to be an instance of SparseVector')
Test.assertTrue(isinstance(bSparse, SparseVector), 'aSparse needs to be an instance of SparseVector')
Test.assertTrue(aDense.dot(w) == aSparse.dot(w),
'dot product of aDense and w should equal dot product of aSparse and w')
Test.assertTrue(bDense.dot(w) == bSparse.dot(w),
'dot product of bDense and w should equal dot product of bSparse and w')
# Reminder of the sample features
# sampleOne = [(0, 'mouse'), (1, 'black')]
# sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
# sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
# EXERCICIO
sampleOneOHEFeatManual = SparseVector(<COMPLETAR>)
sampleTwoOHEFeatManual = SparseVector(<COMPLETAR>)
sampleThreeOHEFeatManual = SparseVector(<COMPLETAR>)
# TEST OHE Features as sparse vectors (1c)
Test.assertTrue(isinstance(sampleOneOHEFeatManual, SparseVector),
'sampleOneOHEFeatManual needs to be a SparseVector')
Test.assertTrue(isinstance(sampleTwoOHEFeatManual, SparseVector),
'sampleTwoOHEFeatManual needs to be a SparseVector')
Test.assertTrue(isinstance(sampleThreeOHEFeatManual, SparseVector),
'sampleThreeOHEFeatManual needs to be a SparseVector')
Test.assertEqualsHashed(sampleOneOHEFeatManual,
'ecc00223d141b7bd0913d52377cee2cf5783abd6',
'incorrect value for sampleOneOHEFeatManual')
Test.assertEqualsHashed(sampleTwoOHEFeatManual,
'26b023f4109e3b8ab32241938e2e9b9e9d62720a',
'incorrect value for sampleTwoOHEFeatManual')
Test.assertEqualsHashed(sampleThreeOHEFeatManual,
'c04134fd603ae115395b29dcabe9d0c66fbdc8a7',
'incorrect value for sampleThreeOHEFeatManual')
# EXERCICIO
def oneHotEncoding(rawFeats, OHEDict, numOHEFeats):
Produce a one-hot-encoding from a list of features and an OHE dictionary.
Note:
You should ensure that the indices used to create a SparseVector are sorted.
Args:
rawFeats (list of (int, str)): The features corresponding to a single observation. Each
feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne)
OHEDict (dict): A mapping of (featureID, value) to unique integer.
numOHEFeats (int): The total number of unique OHE features (combinations of featureID and
value).
Returns:
SparseVector: A SparseVector of length numOHEFeats with indicies equal to the unique
identifiers for the (featureID, value) combinations that occur in the observation and
with values equal to 1.0.
return <COMPLETAR>
# Calculate the number of features in sampleOHEDictManual
numSampleOHEFeats = len(sampleOHEDictManual)
# Run oneHotEnoding on sampleOne
sampleOneOHEFeat = oneHotEncoding(sampleOne, sampleOHEDictManual, numSampleOHEFeats)
print sampleOneOHEFeat
# TEST Define an OHE Function (1d)
Test.assertTrue(sampleOneOHEFeat == sampleOneOHEFeatManual,
'sampleOneOHEFeat should equal sampleOneOHEFeatManual')
Test.assertEquals(sampleOneOHEFeat, SparseVector(7, [2,3], [1.0,1.0]),
'incorrect value for sampleOneOHEFeat')
Test.assertEquals(oneHotEncoding([(1, 'black'), (0, 'mouse')], sampleOHEDictManual,
numSampleOHEFeats), SparseVector(7, [2,3], [1.0,1.0]),
'incorrect definition for oneHotEncoding')
# EXERCICIO
sampleOHEData = sampleDataRDD.<COMPLETAR>
print sampleOHEData.collect()
# TEST Apply OHE to a dataset (1e)
sampleOHEDataValues = sampleOHEData.collect()
Test.assertTrue(len(sampleOHEDataValues) == 3, 'sampleOHEData should have three elements')
Test.assertEquals(sampleOHEDataValues[0], SparseVector(7, {2: 1.0, 3: 1.0}),
'incorrect OHE for first sample')
Test.assertEquals(sampleOHEDataValues[1], SparseVector(7, {1: 1.0, 4: 1.0, 5: 1.0}),
'incorrect OHE for second sample')
Test.assertEquals(sampleOHEDataValues[2], SparseVector(7, {0: 1.0, 3: 1.0, 6: 1.0}),
'incorrect OHE for third sample')
# EXERCICIO
sampleDistinctFeats = (sampleDataRDD
.<COMPLETAR>
.<COMPLETAR>
)
# TEST Pair RDD of (featureID, category) (2a)
Test.assertEquals(sorted(sampleDistinctFeats.collect()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'incorrect value for sampleDistinctFeats')
# EXERCICIO
sampleOHEDict = (sampleDistinctFeats
.<COMPLETAR>
.<COMPLETAR>)
print sampleOHEDict
# TEST OHE Dictionary from distinct features (2b)
Test.assertEquals(sorted(sampleOHEDict.keys()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'sampleOHEDict has unexpected keys')
Test.assertEquals(sorted(sampleOHEDict.values()), range(7), 'sampleOHEDict has unexpected values')
# EXERCICIO
def createOneHotDict(inputData):
Creates a one-hot-encoder dictionary based on the input data.
Args:
inputData (RDD of lists of (int, str)): An RDD of observations where each observation is
made up of a list of (featureID, value) tuples.
Returns:
dict: A dictionary where the keys are (featureID, value) tuples and map to values that are
unique integers.
return (inputData
.<COMPLETAR>
.<COMPLETAR>
.<COMPLETAR>
.<COMPLETAR>
)
sampleOHEDictAuto = createOneHotDict(sampleDataRDD)
print sampleOHEDictAuto
# TEST Automated creation of an OHE dictionary (2c)
Test.assertEquals(sorted(sampleOHEDictAuto.keys()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'sampleOHEDictAuto has unexpected keys')
Test.assertEquals(sorted(sampleOHEDictAuto.values()), range(7),
'sampleOHEDictAuto has unexpected values')
import os.path
baseDir = os.path.join('Data')
inputPath = os.path.join('Aula04', 'dac_sample.txt')
fileName = os.path.join(baseDir, inputPath)
if os.path.isfile(fileName):
rawData = (sc
.textFile(fileName, 2)
.map(lambda x: x.replace('\t', ','))) # work with either ',' or '\t' separated data
print rawData.take(1)
# EXERCICIO
weights = [.8, .1, .1]
seed = 42
# Use randomSplit with weights and seed
rawTrainData, rawValidationData, rawTestData = rawData.randomSplit(weights, seed)
# Cache the data
rawTrainData.<COMPLETAR>
rawValidationData.<COMPLETAR>
rawTestData.<COMPLETAR>
nTrain = rawTrainData.count()
nVal = rawValidationData.count()
nTest = rawTestData.count()
print nTrain, nVal, nTest, nTrain + nVal + nTest
print rawData.take(1)
# TEST Loading and splitting the data (3a)
Test.assertTrue(all([rawTrainData.is_cached, rawValidationData.is_cached, rawTestData.is_cached]),
'you must cache the split data')
Test.assertEquals(nTrain, 79911, 'incorrect value for nTrain')
Test.assertEquals(nVal, 10075, 'incorrect value for nVal')
Test.assertEquals(nTest, 10014, 'incorrect value for nTest')
# EXERCICIO
def parsePoint(point):
Converts a comma separated string into a list of (featureID, value) tuples.
Note:
featureIDs should start at 0 and increase to the number of features - 1.
Args:
point (str): A comma separated string where the first value is the label and the rest
are features.
Returns:
list: A list of (featureID, value) tuples.
<COMPLETAR>
parsedTrainFeat = rawTrainData.map(parsePoint)
numCategories = (parsedTrainFeat
.<COMPLETAR>
.<COMPLETAR>
.<COMPLETAR>
.<COMPLETAR>
.<COMPLETAR>
.collect()
)
print numCategories[2][1]
# TEST Extract features (3b)
Test.assertEquals(numCategories[2][1], 855, 'incorrect implementation of parsePoint')
Test.assertEquals(numCategories[32][1], 4, 'incorrect implementation of parsePoint')
# EXERCICIO
ctrOHEDict = <COMPLETAR>
numCtrOHEFeats = len(ctrOHEDict.keys())
print numCtrOHEFeats
print ctrOHEDict[(0, '')]
# TEST Create an OHE dictionary from the dataset (3c)
Test.assertEquals(numCtrOHEFeats, 233286, 'incorrect number of features in ctrOHEDict')
Test.assertTrue((0, '') in ctrOHEDict, 'incorrect features in ctrOHEDict')
from pyspark.mllib.regression import LabeledPoint
# EXERCICIO
def parseOHEPoint(point, OHEDict, numOHEFeats):
Obtain the label and feature vector for this raw observation.
Note:
You must use the function `oneHotEncoding` in this implementation or later portions
of this lab may not function as expected.
Args:
point (str): A comma separated string where the first value is the label and the rest
are features.
OHEDict (dict of (int, str) to int): Mapping of (featureID, value) to unique integer.
numOHEFeats (int): The number of unique features in the training dataset.
Returns:
LabeledPoint: Contains the label for the observation and the one-hot-encoding of the
raw features based on the provided OHE dictionary.
<COMPLETAR>
OHETrainData = rawTrainData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats))
OHETrainData.cache()
print OHETrainData.take(1)
# Check that oneHotEncoding function was used in parseOHEPoint
backupOneHot = oneHotEncoding
oneHotEncoding = None
withOneHot = False
try: parseOHEPoint(rawTrainData.take(1)[0], ctrOHEDict, numCtrOHEFeats)
except TypeError: withOneHot = True
oneHotEncoding = backupOneHot
# TEST Apply OHE to the dataset (3d)
numNZ = sum(parsedTrainFeat.map(lambda x: len(x)).take(5))
numNZAlt = sum(OHETrainData.map(lambda lp: len(lp.features.indices)).take(5))
Test.assertEquals(numNZ, numNZAlt, 'incorrect implementation of parseOHEPoint')
Test.assertTrue(withOneHot, 'oneHotEncoding not present in parseOHEPoint')
def bucketFeatByCount(featCount):
Bucket the counts by powers of two.
for i in range(11):
size = 2 ** i
if featCount <= size:
return size
return -1
featCounts = (OHETrainData
.flatMap(lambda lp: lp.features.indices)
.map(lambda x: (x, 1))
.reduceByKey(lambda x, y: x + y))
featCountsBuckets = (featCounts
.map(lambda x: (bucketFeatByCount(x[1]), 1))
.filter(lambda (k, v): k != -1)
.reduceByKey(lambda x, y: x + y)
.collect())
print featCountsBuckets
import matplotlib.pyplot as plt
x, y = zip(*featCountsBuckets)
x, y = np.log(x), np.log(y)
def preparePlot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999',
gridWidth=1.0):
Template for generating the plot layout.
plt.close()
fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white')
ax.axes.tick_params(labelcolor='#999999', labelsize='10')
for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]:
axis.set_ticks_position('none')
axis.set_ticks(ticks)
axis.label.set_color('#999999')
if hideLabels: axis.set_ticklabels([])
plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-')
map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right'])
return fig, ax
# generate layout and plot data
fig, ax = preparePlot(np.arange(0, 10, 1), np.arange(4, 14, 2))
ax.set_xlabel(r'$\log_e(bucketSize)$'), ax.set_ylabel(r'$\log_e(countInBucket)$')
plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75)
pass
# EXERCICIO
def oneHotEncoding(rawFeats, OHEDict, numOHEFeats):
Produce a one-hot-encoding from a list of features and an OHE dictionary.
Note:
If a (featureID, value) tuple doesn't have a corresponding key in OHEDict it should be
ignored.
Args:
rawFeats (list of (int, str)): The features corresponding to a single observation. Each
feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne)
OHEDict (dict): A mapping of (featureID, value) to unique integer.
numOHEFeats (int): The total number of unique OHE features (combinations of featureID and
value).
Returns:
SparseVector: A SparseVector of length numOHEFeats with indicies equal to the unique
identifiers for the (featureID, value) combinations that occur in the observation and
with values equal to 1.0.
<COMPLETAR>
OHEValidationData = rawValidationData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats))
OHEValidationData.cache()
print OHEValidationData.take(1)
# TEST Handling unseen features (3e)
numNZVal = (OHEValidationData
.map(lambda lp: len(lp.features.indices))
.sum())
Test.assertEquals(numNZVal, 372080, 'incorrect number of features')
from pyspark.mllib.classification import LogisticRegressionWithSGD
# fixed hyperparameters
numIters = 50
stepSize = 10.
regParam = 1e-6
regType = 'l2'
includeIntercept = True
# EXERCICIO
model0 = <COMPLETAR>
sortedWeights = sorted(model0.weights)
print sortedWeights[:5], model0.intercept
# TEST Logistic regression (4a)
Test.assertTrue(np.allclose(model0.intercept, 0.56455084025), 'incorrect value for model0.intercept')
Test.assertTrue(np.allclose(sortedWeights[0:5],
[-0.45899236853575609, -0.37973707648623956, -0.36996558266753304,
-0.36934962879928263, -0.32697945415010637]), 'incorrect value for model0.weights')
# EXERCICIO
from math import log
def computeLogLoss(p, y):
Calculates the value of log loss for a given probabilty and label.
Note:
log(0) is undefined, so when p is 0 we need to add a small value (epsilon) to it
and when p is 1 we need to subtract a small value (epsilon) from it.
Args:
p (float): A probabilty between 0 and 1.
y (int): A label. Takes on the values 0 and 1.
Returns:
float: The log loss value.
<COMPLETAR>
print computeLogLoss(.5, 1)
print computeLogLoss(.5, 0)
print computeLogLoss(.99, 1)
print computeLogLoss(.99, 0)
print computeLogLoss(.01, 1)
print computeLogLoss(.01, 0)
print computeLogLoss(0, 1)
print computeLogLoss(1, 1)
print computeLogLoss(1, 0)
# TEST Log loss (4b)
Test.assertTrue(np.allclose([computeLogLoss(.5, 1), computeLogLoss(.01, 0), computeLogLoss(.01, 1)],
[0.69314718056, 0.0100503358535, 4.60517018599]),
'computeLogLoss is not correct')
Test.assertTrue(np.allclose([computeLogLoss(0, 1), computeLogLoss(1, 1), computeLogLoss(1, 0)],
[25.3284360229, 1.00000008275e-11, 25.3284360229]),
'computeLogLoss needs to bound p away from 0 and 1 by epsilon')
# EXERCICIO
# Note that our dataset has a very high click-through rate by design
# In practice click-through rate can be one to two orders of magnitude lower
classOneFracTrain = OHETrainData.<COMPLETAR>
print classOneFracTrain
logLossTrBase = OHETrainData.<COMPLETAR>
print 'Baseline Train Logloss = {0:.3f}\n'.format(logLossTrBase)
# TEST Baseline log loss (4c)
Test.assertTrue(np.allclose(classOneFracTrain, 0.22717773523), 'incorrect value for classOneFracTrain')
Test.assertTrue(np.allclose(logLossTrBase, 0.535844), 'incorrect value for logLossTrBase')
# EXERCICIO
from math import exp # exp(-t) = e^-t
def getP(x, w, intercept):
Calculate the probability for an observation given a set of weights and intercept.
Note:
We'll bound our raw prediction between 20 and -20 for numerical purposes.
Args:
x (SparseVector): A vector with values of 1.0 for features that exist in this
observation and 0.0 otherwise.
w (DenseVector): A vector of weights (betas) for the model.
intercept (float): The model's intercept.
Returns:
float: A probability between 0 and 1.
# calculate rawPrediction = w.x + intercept
rawPrediction = <COMPLETAR>
# Bound the raw prediction value
rawPrediction = min(rawPrediction, 20)
rawPrediction = max(rawPrediction, -20)
# calculate (1+e^-rawPrediction)^-1
return <COMPLETAR>
trainingPredictions = OHETrainData.<COMPLETAR>
print trainingPredictions.take(5)
# TEST Predicted probability (4d)
Test.assertTrue(np.allclose(trainingPredictions.sum(), 18135.4834348),
'incorrect value for trainingPredictions')
# EXERCICIO
def evaluateResults(model, data):
Calculates the log loss for the data given the model.
Args:
model (LogisticRegressionModel): A trained logistic regression model.
data (RDD of LabeledPoint): Labels and features for each observation.
Returns:
float: Log loss for the data.
return (data
.<COMPLETAR>
.<COMPLETAR>
.<COMPLETAR>
)
logLossTrLR0 = evaluateResults(model0, OHETrainData)
print ('OHE Features Train Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossTrBase, logLossTrLR0))
# TEST Evaluate the model (4e)
Test.assertTrue(np.allclose(logLossTrLR0, 0.456903), 'incorrect value for logLossTrLR0')
# EXERCICIO
logLossValBase = OHEValidationData.<COMPLETAR>
logLossValLR0 = evaluateResults(model0, OHEValidationData)
print ('OHE Features Validation Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossValBase, logLossValLR0))
# TEST Validation log loss (4f)
Test.assertTrue(np.allclose(logLossValBase, 0.527603), 'incorrect value for logLossValBase')
Test.assertTrue(np.allclose(logLossValLR0, 0.456957), 'incorrect value for logLossValLR0')
labelsAndScores = OHEValidationData.map(lambda lp:
(lp.label, getP(lp.features, model0.weights, model0.intercept)))
labelsAndWeights = labelsAndScores.collect()
labelsAndWeights.sort(key=lambda (k, v): v, reverse=True)
labelsByWeight = np.array([k for (k, v) in labelsAndWeights])
length = labelsByWeight.size
truePositives = labelsByWeight.cumsum()
numPositive = truePositives[-1]
falsePositives = np.arange(1.0, length + 1, 1.) - truePositives
truePositiveRate = truePositives / numPositive
falsePositiveRate = falsePositives / (length - numPositive)
# Generate layout and plot data
fig, ax = preparePlot(np.arange(0., 1.1, 0.1), np.arange(0., 1.1, 0.1))
ax.set_xlim(-.05, 1.05), ax.set_ylim(-.05, 1.05)
ax.set_ylabel('True Positive Rate (Sensitivity)')
ax.set_xlabel('False Positive Rate (1 - Specificity)')
plt.plot(falsePositiveRate, truePositiveRate, color='#8cbfd0', linestyle='-', linewidth=3.)
plt.plot((0., 1.), (0., 1.), linestyle='--', color='#d6ebf2', linewidth=2.) # Baseline model
pass
from collections import defaultdict
import hashlib
def hashFunction(numBuckets, rawFeats, printMapping=False):
Calculate a feature dictionary for an observation's features based on hashing.
Note:
Use printMapping=True for debug purposes and to better understand how the hashing works.
Args:
numBuckets (int): Number of buckets to use as features.
rawFeats (list of (int, str)): A list of features for an observation. Represented as
(featureID, value) tuples.
printMapping (bool, optional): If true, the mappings of featureString to index will be
printed.
Returns:
dict of int to float: The keys will be integers which represent the buckets that the
features have been hashed to. The value for a given key will contain the count of the
(featureID, value) tuples that have hashed to that key.
mapping = {}
for ind, category in rawFeats:
featureString = category + str(ind)
mapping[featureString] = int(int(hashlib.md5(featureString).hexdigest(), 16) % numBuckets)
if(printMapping): print mapping
sparseFeatures = defaultdict(float)
for bucket in mapping.values():
sparseFeatures[bucket] += 1.0
return dict(sparseFeatures)
# Reminder of the sample values:
# sampleOne = [(0, 'mouse'), (1, 'black')]
# sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
# sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
# EXERCICIO
# Use four buckets
sampOneFourBuckets = <COMPLETAR>
sampTwoFourBuckets = <COMPLETAR>
sampThreeFourBuckets = <COMPLETAR>
# Use one hundred buckets
sampOneHundredBuckets = <COMPLETAR>
sampTwoHundredBuckets = <COMPLETAR>
sampThreeHundredBuckets = <COMPLETAR>
print '\t\t 4 Buckets \t\t\t 100 Buckets'
print 'SampleOne:\t {0}\t\t {1}'.format(sampOneFourBuckets, sampOneHundredBuckets)
print 'SampleTwo:\t {0}\t\t {1}'.format(sampTwoFourBuckets, sampTwoHundredBuckets)
print 'SampleThree:\t {0}\t {1}'.format(sampThreeFourBuckets, sampThreeHundredBuckets)
# TEST Hash function (5a)
Test.assertEquals(sampOneFourBuckets, {2: 1.0, 3: 1.0}, 'incorrect value for sampOneFourBuckets')
Test.assertEquals(sampThreeHundredBuckets, {72: 1.0, 5: 1.0, 14: 1.0},
'incorrect value for sampThreeHundredBuckets')
# EXERCICIO
def parseHashPoint(point, numBuckets):
Create a LabeledPoint for this observation using hashing.
Args:
point (str): A comma separated string where the first value is the label and the rest are
features.
numBuckets: The number of buckets to hash to.
Returns:
LabeledPoint: A LabeledPoint with a label (0.0 or 1.0) and a SparseVector of hashed
features.
<COMPLETAR>
numBucketsCTR = 2 ** 15
hashTrainData = rawTrainData.map(lambda x: parseHashPoint(x,numBucketsCTR))
hashTrainData.cache()
hashValidationData = rawValidationData.map(lambda x: parseHashPoint(x,numBucketsCTR))
hashValidationData.cache()
hashTestData = rawTestData.map(lambda x: parseHashPoint(x,numBucketsCTR))
hashTestData.cache()
print hashTrainData.take(1)
# TEST Creating hashed features (5b)
hashTrainDataFeatureSum = sum(hashTrainData
.map(lambda lp: len(lp.features.indices))
.take(20))
hashTrainDataLabelSum = sum(hashTrainData
.map(lambda lp: lp.label)
.take(100))
hashValidationDataFeatureSum = sum(hashValidationData
.map(lambda lp: len(lp.features.indices))
.take(20))
hashValidationDataLabelSum = sum(hashValidationData
.map(lambda lp: lp.label)
.take(100))
hashTestDataFeatureSum = sum(hashTestData
.map(lambda lp: len(lp.features.indices))
.take(20))
hashTestDataLabelSum = sum(hashTestData
.map(lambda lp: lp.label)
.take(100))
Test.assertEquals(hashTrainDataFeatureSum, 772, 'incorrect number of features in hashTrainData')
Test.assertEquals(hashTrainDataLabelSum, 24.0, 'incorrect labels in hashTrainData')
Test.assertEquals(hashValidationDataFeatureSum, 776,
'incorrect number of features in hashValidationData')
Test.assertEquals(hashValidationDataLabelSum, 16.0, 'incorrect labels in hashValidationData')
Test.assertEquals(hashTestDataFeatureSum, 774, 'incorrect number of features in hashTestData')
Test.assertEquals(hashTestDataLabelSum, 23.0, 'incorrect labels in hashTestData')
# EXERCICIO
def computeSparsity(data, d, n):
Calculates the average sparsity for the features in an RDD of LabeledPoints.
Args:
data (RDD of LabeledPoint): The LabeledPoints to use in the sparsity calculation.
d (int): The total number of features.
n (int): The number of observations in the RDD.
Returns:
float: The average of the ratio of features in a point to total features.
return (data
.<COMPLETAR>
.<COMPLETAR>
)/(d*n*1.)
averageSparsityHash = computeSparsity(hashTrainData, numBucketsCTR, nTrain)
averageSparsityOHE = computeSparsity(OHETrainData, numCtrOHEFeats, nTrain)
print 'Average OHE Sparsity: {0:.7e}'.format(averageSparsityOHE)
print 'Average Hash Sparsity: {0:.7e}'.format(averageSparsityHash)
# TEST Sparsity (5c)
Test.assertTrue(np.allclose(averageSparsityOHE, 1.6717677e-04),
'incorrect value for averageSparsityOHE')
Test.assertTrue(np.allclose(averageSparsityHash, 1.1805561e-03),
'incorrect value for averageSparsityHash')
numIters = 500
regType = 'l2'
includeIntercept = True
# Initialize variables using values from initial model training
bestModel = None
bestLogLoss = 1e10
# EXERCICIO
stepSizes = [1, 10]
regParams = [1e-6, 1e-3]
for stepSize in stepSizes:
for regParam in regParams:
model = (<COMPLETAR>)
logLossVa = <COMPLETAR>
print ('\tstepSize = {0:.1f}, regParam = {1:.0e}: logloss = {2:.3f}'
.format(stepSize, regParam, logLossVa))
if (logLossVa < bestLogLoss):
bestModel = model
bestLogLoss = logLossVa
print ('Hashed Features Validation Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossValBase, bestLogLoss))
# TEST Logistic model with hashed features (5d)
Test.assertTrue(np.allclose(bestLogLoss, 0.4481683608), 'incorrect value for bestLogLoss')
# EXERCICIO
# Log loss for the best model from (5d)
logLossValLR0 = <COMPLETAR>
logLossTest = <COMPLETAR>
# Log loss for the baseline model
logLossTestBaseline = hashTestData.map(lambda lp: computeLogLoss(classOneFracTrain,lp.label)).mean()
print ('Hashed Features Test Log Loss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossTestBaseline, logLossTest))
# TEST Evaluate on the test set (5e)
Test.assertTrue(np.allclose(logLossTestBaseline, 0.537438),
'incorrect value for logLossTestBaseline')
Test.assertTrue(np.allclose(logLossTest, 0.455616931), 'incorrect value for logLossTest')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Something changed, or I forgot something, so I have to move the data into class folders.
Step2: 1. View Data
Step3: I am so happy that worked. I swear Python analytics programming with Jupyter is super rewarding compared to other programming.
Step4: checking I moved the validation set correctly
Step5: 2. Fully Connected Model
Step6: Understanding pytorch tensor reshaping
Step7: That means every layer of SimpleNet after the first is a $1 \times V$ vector, where $V$ is the 'Tensor volume' of the first layer (product of all dimensions).
Step8: The learning rate finder stops early when it reaches its standard cut-off loss.
Step9: 3. ConvNet
Step10: Aha, so a v.simple ConvNet. Just one pooling layer after the Conv block. Interesting, the Adaptive Max Pooling layer transforms our -1x80x3x3 feature map to a -1x80x1x1 vector.
Step11: 4. Refactored ConvNet
Step12: ni
Step13: So it looks like the Adaptive Max Pooling layers are sorta defined along with or folded into the Conv Layers. They were defined more explicitly earlier via
Step14: 5. Batch Normalization
Step15: Fast.ai DL1(2018) Lecture 7
Step16: Here a large conv layer is added to the start, with a large kernel size and stride 1. The idea is to have an input layer with a richer input. We use bigger filters on bigger areas; and output 10 5x5 filters. Current (2018) modern approach. (sometimes even 11x11 kernel & 32 out filters). $Stride = 1$ along with $padding = \dfrac{kernelsize - 1}{2}$ ie
Step17: 6. Deep BatchNorm
Step18: Accuracy notes
Step19: That's it. Everything else will be identical.. except we can now do way more layers than before.
Step20: The big difference between this and actual residual networks is that there are often 2 convolutions in residual instead of 1 here. That's the .. + super().forward(x)
Step21: 8. ResNet (bigger)
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%reload_ext autoreload
%autoreload 2
from fastai.conv_learner import *
PATH = 'data/cifar10/'
os.makedirs(PATH, exist_ok=True)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
stats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159]))
## move dataset
for dataset in ['test/','train/']:
# get list of all classes
g = glob(PATH + dataset + '*.png')
g = [elem.split('_')[1].split('.png')[0] for elem in g]
g = np.unique(g)
# make class folders NOTE: I could've just uses `classes` from above
for cls in g:
os.mkdir(PATH + dataset + cls)
# for cls in g: # to reset
# os.rmdir(PATH + dataset + cls)
# move dataset to class folders
g = glob(PATH + dataset + '*.png')
for fpath in g:
cls = fpath.split('_')[-1].split('.png')[0]+'/'
fname = fpath.split(dataset)[-1]
os.rename(fpath, PATH+dataset+cls+fname)
# testing function mapping
f = lambda x: x + 2
a = [i for i in range(5)]
a = list(map(f, a))
a
def get_data(sz, bs):
tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomFlip()], pad=sz//8)
return ImageClassifierData.from_paths(PATH, val_name='test', tfms=tfms, bs=bs)
bs=256
data = get_data(32,4)
x,y = next(iter(data.trn_dl))
plt.imshow(data.trn_ds.denorm(x)[0])
plt.imshow(data.trn_ds.denorm(x)[1])
plt.imshow(data.val_ds.denorm(x)[0])
data = get_data(32, bs)
lr = 1e-2
class SimpleNet(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = nn.ModuleList([
nn.Linear(layers[i], layers[i+1]) for i in range(len(layers) -1 )])
def forward(self, x):
x = x.view(x.size(0), -1) # input tensor shape
for λ in self.layers: # for each layer
λ_x = λ(x) # its output is itself acted upon the input
x = F.relu(λ_x) # "nonlinearity" (activation function)
return F.log_softmax(λ_x, dim=1) # output activation function
# make x a 1x1x32x32 Tensor
x = torch.autograd.Variable(torch.ones(1,1,32,32), requires_grad=True)
print(x.size())
# reshape x to a shape compatible with the size of its 1st dimension
# here: `-1` infers compatible size of dim2 given dim1=x.size(0)=1
# ==> returns a 1x1024 Tensor. (32**32=1024)
x = x.view(x.size(0), -1)
print(x.size())
layers = [32*32*3, 40, 10]
print([[layers[i], layers[i+1] ]for i in range(len(layers)-1)])
torch.nn.ModuleList([
nn.Linear(layers[i], layers[i+1]) for i in range(len(layers) -1)
])
# using the Fast.ai library to do its black magic
# by passing our SimpleNet class into fastai.conv_learner.ConvLearner
learner = ConvLearner.from_model_data(SimpleNet([32*32*3, 40, 10]), data)
print(learner)
print([par.numel() for par in learner.model.parameters()])
learner.lr_find()
learner.sched.plot()
# train learner at learning rate λr for 2 cycles
%time learner.fit(lr, 2)
# train learner at λr, for 2 cycles, with a cycle length of 1
%time learner.fit(lr, 2, cycle_len=1)
class ConvNet(nn.Module):
def __init__(self, layers, c): # c: num classes
super().__init__()
self.layers = nn.ModuleList([
nn.Conv2d(layers[i], layers[i+1], kernel_size=3, stride=2)
for i in range(len(layers) - 1)
])
self.pool = nn.AdaptiveMaxPool2d(1) # define pooling layer to use in forward()
self.out = nn.Linear(layers[-1], c) # final FC/Lin layer
def forward(self, x):
for λ in self.layers:
x = F.relu(λ(x)) # same as λ_x=λ(x); x=F.relu(λ_x)
x = self.pool(x) # I guess only 1 Conv layer so only 1 Pool at end?
x = x.view(x.size(0), -1) # trsfm tensor for final Linlayer
return F.log_softmax(self.out(x), dim=-1)
learner = ConvLearner.from_model_data(ConvNet([3, 20, 40, 80], 10), data)
learner.summary()
learner.lr_find()
learner.sched.plot()
%time learner.fit(1e-1, 2)
%time learner.fit(1e-1, 4, cycle_len=1)
class ConvLayer(nn.Module):
def __init__(self, ni, nf):
super().__init__()
self.conv = nn.Conv2d(ni, nf, kernel_size=3, stride=2, padding=1)
def forward(self, x):
return F.relu(self.conv(x))
class ConvNet2(nn.Module):
def __init__(self, layers, c):
super().__init__() # torch.nn.Module inheritence
self.layers = nn.ModuleList([ConvLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
for λ in self.layers:
x = λ(x) # yet another way to do x=F.relu(λ(x)) | λ_x=λ(x);x=F.relu(λ_x)
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=1)
learner = ConvLearner.from_model_data(ConvNet2([3, 20, 40, 80], 10), data)
learner.summary()
learner
%time learner.fit(1e-1, 2)
%time learner.fit(1e-1, 2, cycle_len=1)
class BnLayer(nn.Module):
def __init__(self, ni, nf, stride=2, kernel_size=3):
super().__init__()
self.conv = nn.Conv2d(ni, nf, kernel_size=kernel_size, stride=stride,
bias=False, padding=1)
self.a = nn.Parameter(torch.zeros(nf, 1, 1)) # adder
self.m = nn.Parameter(torch.ones(nf, 1, 1)) # multiplier
def forward(self, x):
x = F.relu(self.conv(x))
x_chan = x.transpose(0,1).contiguous().view(x.size(1), -1)
if self.training: # true during training; false during evaluation
self.means = x_chan.mean(1)[:, None, None] # calc mean of each conv filter (channel)
self.stds = x_chan.std(1)[:, None, None] # calc stdev ofeach conv filter (channel)
return (x - self.means) / self.stds*self.m + self.a # subtract means, divide by stdevs
class ConvBnNet(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for λ in self.layers:
x = λ(x)
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
# 5 Conv layerse + 1 FC layer
learner = ConvLearner.from_model_data(ConvBnNet([10,20,40,80,160], 10), data)
learner.summary()
%time learner.fit(3e-2, 2)
%time learner.fit(1e-1, 4, cycle_len=1)
class ConvBnNet2(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.layers2 = nn.ModuleList([BnLayer(layers[i+1], layers[i+1], 1)
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for λ, λ2 in zip(self.layers, self.layers2):
x = λ(x)
x = λ2(x)
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
# creates a 12-layer ConvNet. 1 larger Conv in, 10 conv middle, 1 Lin out.
learner = ConvLearner.from_model_data(ConvBnNet2([10, 20, 40, 80, 160], 10), data)
%time learner.fit(1e-2,2)
class ResnetLayer(BnLayer):
def forward(self, x):
return x + super().forward(x)
# forward fn of ResNetLayer is to return the input along with
# the output of the inherited forward fn from BnLayer, applied to
# the input.
class ResNet(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.layers2 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i+1], 1)
for i in range(len(layers) - 1)])
self.layers3 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i+1], 1)
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for λ, λ2, λ3 in zip(self.layers, self.layers2, self.layers3):
x = λ3(λ3(λ(x)))
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learner = ConvLearner.from_model_data(ResNet([10,20,40,80,160],10), data)
# weight decay
wd = 1e-5
%time learner.fit(1e-2, 2, wds=wd)
%time learner.fit(1e-2, 3, cycle_len=1, cycle_mult=2, wds=wd)
%time learner.fit(1e-2, 8, cycle_len=4, wds=wd)
class ResNet2(nn.Module):
def __init__(self, layers, c, p=0.5):
super().__init__()
self.conv1 = BnLayer(3, 16, stride=1, kernel_size=7)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.layers2= nn.ModuleList([ResnetLayer(layers[i+1],layers[i+1], 1)
for i in range(len(layers) - 1)])
self.layers3= nn.ModuleList([ResnetLayer(layers[i+1],layers[i+1], 1)
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
self.drop= nn.Dropout(p)
def forward(self, x):
x = self.conv1(x)
for λ, λ2, λ3 in zip(self.layers, self.layers2, self.layers3):
x = λ3(λ2(λ(x)))
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
x = self.drop(x)
return F.log_softmax(self.out(x), dim=-1)
learner = ConvLearner.from_model_data(ResNet2([16,32,128,256], 10, 0.2), data)
wd=1e-6
%time learner.fit(1e-2, 2, wds=wd)
%time learner.fit(1e-2, 3, cycle_len=1, cycle_mult=2, wds=wd)
%time learner.fit(1e-2, 8, cycle_len=4, wds=wd)
learner.save('tmp')
log_preds, y = learner.TTA()
preds = np.mean(np.exp(log_preds), 0)
metrics.log_loss(y, preds), accuracy(preds, y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Protocol is not the same for XL320 servomotors, set the using_XL320 flag to True if you use them.
Step2: If the code below gives you an exception, try to restart all other notebooks that may be running, wait 5 seconds and try again.
Step3: Kill whoever uses the ports (should be used only as last chance try to free the ports).
|
<ASSISTANT_TASK:>
Python Code:
import pypot.dynamixel
ports = pypot.dynamixel.get_available_ports()
if not ports:
raise IOError('no port found!')
print 'ports found', ports
using_XL320 = False
my_baudrate = 1000000
for port in ports:
print port
try:
if using_XL320:
dxl_io = pypot.dynamixel.Dxl320IO(port, baudrate=my_baudrate)
else:
dxl_io = pypot.dynamixel.DxlIO(port, baudrate=my_baudrate)
print "scanning"
found = dxl_io.scan(range(60))
print found
dxl_io.close()
except Exception, e:
print e
import os
for port in ports:
os.system('fuser -k '+port);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load forward solution and inverse operator
Step2: Read and organise labels for cortical parcellation
Step3: Compute point-spread function summaries (PCA) for all labels
Step4: We can show the explained variances of principal components per label. Note
Step5: The output shows the summed variance explained by the first five principal
Step6: Most leakage occurs for neighbouring regions, but also for deeper regions
Step7: Point-spread function for the lateral occipital label in the left hemisphere
Step8: and in the right hemisphere.
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import (read_inverse_operator,
make_inverse_resolution_matrix,
get_point_spread)
from mne.viz import circular_layout
from mne_connectivity.viz import plot_connectivity_circle
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-fixed-inv.fif'
forward = mne.read_forward_solution(fname_fwd)
# Convert forward solution to fixed source orientations
mne.convert_forward_solution(
forward, surf_ori=True, force_fixed=True, copy=False)
inverse_operator = read_inverse_operator(fname_inv)
# Compute resolution matrices for MNE
rm_mne = make_inverse_resolution_matrix(forward, inverse_operator,
method='MNE', lambda2=1. / 3.**2)
src = inverse_operator['src']
del forward, inverse_operator # save memory
labels = mne.read_labels_from_annot('sample', parc='aparc',
subjects_dir=subjects_dir)
n_labels = len(labels)
label_colors = [label.color for label in labels]
# First, we reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
# Get the y-location of the label
label_ypos = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels]
# Compute first PCA component across PSFs within labels.
# Note the differences in explained variance, probably due to different
# spatial extents of labels.
n_comp = 5
stcs_psf_mne, pca_vars_mne = get_point_spread(
rm_mne, src, labels, mode='pca', n_comp=n_comp, norm=None,
return_pca_vars=True)
n_verts = rm_mne.shape[0]
del rm_mne
with np.printoptions(precision=1):
for [name, var] in zip(label_names, pca_vars_mne):
print(f'{name}: {var.sum():.1f}% {var}')
# get PSFs from Source Estimate objects into matrix
psfs_mat = np.zeros([n_labels, n_verts])
# Leakage matrix for MNE, get first principal component per label
for [i, s] in enumerate(stcs_psf_mne):
psfs_mat[i, :] = s.data[:, 0]
# Compute label-to-label leakage as Pearson correlation of PSFs
# Sign of correlation is arbitrary, so take absolute values
leakage_mne = np.abs(np.corrcoef(psfs_mat))
# Save the plot order and create a circular layout
node_order = lh_labels[::-1] + rh_labels # mirror label order across hemis
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) / 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 200 strongest connections.
fig = plt.figure(num=None, figsize=(8, 8), facecolor='black')
plot_connectivity_circle(leakage_mne, label_names, n_lines=200,
node_angles=node_angles, node_colors=label_colors,
title='MNE Leakage', fig=fig)
# left and right lateral occipital
idx = [22, 23]
stc_lh = stcs_psf_mne[idx[0]]
stc_rh = stcs_psf_mne[idx[1]]
# Maximum for scaling across plots
max_val = np.max([stc_lh.data, stc_rh.data])
brain_lh = stc_lh.plot(subjects_dir=subjects_dir, subject='sample',
hemi='both', views='caudal',
clim=dict(kind='value',
pos_lims=(0, max_val / 2., max_val)))
brain_lh.add_text(0.1, 0.9, label_names[idx[0]], 'title', font_size=16)
brain_rh = stc_rh.plot(subjects_dir=subjects_dir, subject='sample',
hemi='both', views='caudal',
clim=dict(kind='value',
pos_lims=(0, max_val / 2., max_val)))
brain_rh.add_text(0.1, 0.9, label_names[idx[1]], 'title', font_size=16)
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.