Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
14,200
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid')
layer_0 = Input(shape=data_in_shape)
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(3200 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['recurrent.GRU.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
[w.shape for w in model.get_weights()]
data_in_shape = (8, 5)
rnn = GRU(5, activation='sigmoid', recurrent_activation='sigmoid')
layer_0 = Input(shape=data_in_shape)
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(3300 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['recurrent.GRU.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
return_sequences=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(3400 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['recurrent.GRU.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
return_sequences=False, go_backwards=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(3410 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['recurrent.GRU.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
return_sequences=True, go_backwards=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(3420 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['recurrent.GRU.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
return_sequences=False, go_backwards=False, stateful=True)
layer_0 = Input(batch_shape=(1, *data_in_shape))
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(3430 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['recurrent.GRU.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
return_sequences=True, go_backwards=False, stateful=True)
layer_0 = Input(batch_shape=(1, *data_in_shape))
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(3440 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['recurrent.GRU.6'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid',
return_sequences=False, go_backwards=True, stateful=True)
layer_0 = Input(batch_shape=(1, *data_in_shape))
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(3450 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['recurrent.GRU.7'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (3, 6)
rnn = GRU(4, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=False,
return_sequences=True, go_backwards=True, stateful=True)
layer_0 = Input(batch_shape=(1, *data_in_shape))
layer_1 = rnn(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(3460 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'U']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['recurrent.GRU.8'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
import os
filename = '../../../test/data/layers/recurrent/GRU.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [recurrent.GRU.1] units=5, activation='sigmoid', recurrent_activation='sigmoid'
Step2: [recurrent.GRU.2] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=True
Step3: [recurrent.GRU.3] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=False, go_backwards=True
Step4: [recurrent.GRU.4] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=True, go_backwards=True
Step5: [recurrent.GRU.5] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=False, go_backwards=False, stateful=True
Step6: [recurrent.GRU.6] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=True, go_backwards=False, stateful=True
Step7: [recurrent.GRU.7] units=4, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=False, go_backwards=True, stateful=True
Step8: [recurrent.GRU.8] units=4, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=False, return_sequences=True, go_backwards=True, stateful=True
Step9: export for Keras.js tests
|
14,201
|
<ASSISTANT_TASK:>
Python Code:
if os.path.exists('dress_sf_processed.sf'):
reference_sf = graphlab.SFrame('dress_sf_processed.sf')
else:
reference_sf = graphlab.SFrame('https://static.turi.com/datasets/dress_sf_processed.sf')
reference_sf.save('dress_sf_processed.sf')
if os.path.exists('dress_nn_model'):
nn_model = graphlab.load_model('dress_nn_model')
else:
nn_model = graphlab.load_model('https://static.turi.com/models/dress_nn_model')
nn_model.save('dress_nn_model')
if os.path.exists('imagenet_model'):
pretrained_model = graphlab.load_model('imagenet_model')
else:
pretrained_model = graphlab.load_model('https://static.turi.com/models/imagenet_model_iter45')
pretrained_model.save('imagenet_model')
pretrained_model
reference_sf
def dress_similar(url):
img = graphlab.Image(url)
image_sf = graphlab.SFrame()
image_sf['image'] = [img]
image_sf['features'] = pretrained_model.extract_features(image_sf)
ans = nn_model.query(image_sf, k=5)
return ans
QUERY_URL = 'http://static.ddmcdn.com/gif/blue-dress.jpg'
Image(QUERY_URL)
def retrieve_image(nearest_neighbors_output, input_sframe):
joined = input_sframe.join(nearest_neighbors_output, on={'_id':'reference_label'})
sorted_sf = joined.sort('rank')
return sorted_sf['image']
images = retrieve_image(dress_similar(QUERY_URL), reference_sf)
images.show()
import graphlab as gl
# Replace with your path.
ps_state_path = 's3://<your-bucket-name>/predictive_service/ps'
# Set your AWS credentials.
gl.aws.set_credentials(<key>, <secret>)
# Create an EC2 config
ec2_config = gl.deploy.Ec2Config()
# Launch a predictive service
ps = gl.deploy.predictive_service.create(name = 'sklearn-predictive-service',
ec2_config = ec2_config, state_path = ps_state_path, num_hosts = 1)
import graphlab as gl
ps = gl.deploy.predictive_service.load(TBD)
ps
#ps.add('dress_similar', dress_similar)
#ps.update('dress_similar', dress_similar)
ps.apply_changes()
ps.query('dress_similar', url=QUERY_URL)
import json
import requests
from requests.auth import HTTPBasicAuth
def restful_query(x):
headers = {'content-type': 'application/json'}
payload = {'data': {'url': url} }
end_point = 'http://TBD/query/dress_similar'
return requests.post(
end_point,
json.dumps(payload),
headers=headers,
auth=HTTPBasicAuth('api_key', TBD)).json()
restful_query('http://static.ddmcdn.com/gif/blue-dress.jpg').show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Create a Predictive Service (One time) <a id='create'></a>
Step2: Load an already created service
Step3: Query the model <a id='query'></a>
Step4: Query via REST
|
14,202
|
<ASSISTANT_TASK:>
Python Code:
import re
format_pat= re.compile(
r"(?P<host>[\d\.]+)\s"
r"(?P<identity>\S*)\s"
r"(?P<user>\S*)\s"
r"\[(?P<time>.*?)\]\s"
r'"(?P<request>.*?)"\s'
r"(?P<status>\d+)\s"
r"(?P<bytes>\S*)\s"
r'"(?P<referer>.*?)"\s'
r'"(?P<user_agent>.*?)"\s*'
)
logPath = "E:\\sundog-consult\\Udemy\\DataScience\\access_log.txt"
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
request = access['request']
(action, URL, protocol) = request.split()
if URLCounts.has_key(URL):
URLCounts[URL] = URLCounts[URL] + 1
else:
URLCounts[URL] = 1
results = sorted(URLCounts, key=lambda i: int(URLCounts[i]), reverse=True)
for result in results[:20]:
print(result + ": " + str(URLCounts[result]))
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
request = access['request']
fields = request.split()
if (len(fields) != 3):
print(fields)
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
request = access['request']
fields = request.split()
if (len(fields) == 3):
URL = fields[1]
if URL in URLCounts:
URLCounts[URL] = URLCounts[URL] + 1
else:
URLCounts[URL] = 1
results = sorted(URLCounts, key=lambda i: int(URLCounts[i]), reverse=True)
for result in results[:20]:
print(result + ": " + str(URLCounts[result]))
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
request = access['request']
fields = request.split()
if (len(fields) == 3):
(action, URL, protocol) = fields
if (action == 'GET'):
if URL in URLCounts:
URLCounts[URL] = URLCounts[URL] + 1
else:
URLCounts[URL] = 1
results = sorted(URLCounts, key=lambda i: int(URLCounts[i]), reverse=True)
for result in results[:20]:
print(result + ": " + str(URLCounts[result]))
UserAgents = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
agent = access['user_agent']
if agent in UserAgents:
UserAgents[agent] = UserAgents[agent] + 1
else:
UserAgents[agent] = 1
results = sorted(UserAgents, key=lambda i: int(UserAgents[i]), reverse=True)
for result in results:
print(result + ": " + str(UserAgents[result]))
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
agent = access['user_agent']
if (not('bot' in agent or 'spider' in agent or
'Bot' in agent or 'Spider' in agent or
'W3 Total Cache' in agent or agent =='-')):
request = access['request']
fields = request.split()
if (len(fields) == 3):
(action, URL, protocol) = fields
if (action == 'GET'):
if URL in URLCounts:
URLCounts[URL] = URLCounts[URL] + 1
else:
URLCounts[URL] = 1
results = sorted(URLCounts, key=lambda i: int(URLCounts[i]), reverse=True)
for result in results[:20]:
print(result + ": " + str(URLCounts[result]))
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
agent = access['user_agent']
if (not('bot' in agent or 'spider' in agent or
'Bot' in agent or 'Spider' in agent or
'W3 Total Cache' in agent or agent =='-')):
request = access['request']
fields = request.split()
if (len(fields) == 3):
(action, URL, protocol) = fields
if (URL.endswith("/")):
if (action == 'GET'):
if URL in URLCounts:
URLCounts[URL] = URLCounts[URL] + 1
else:
URLCounts[URL] = 1
results = sorted(URLCounts, key=lambda i: int(URLCounts[i]), reverse=True)
for result in results[:20]:
print(result + ": " + str(URLCounts[result]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here's the full path to the log file I'm analyzing; change this if you want to run this stuff yourself
Step2: Now we'll whip up a little script to extract the URL in each access, and use a dictionary to count up the number of times each one appears. Then we'll sort it and print out the top 20 pages. What could go wrong?
Step3: Hm. The 'request' part of the line is supposed to look something like this
Step4: Huh. In addition to empty fields, there's one that just contains garbage. Well, let's modify our script to check for that case
Step5: It worked! But, the results don't really make sense. What we really want is pages accessed by real humans looking for news from our little news site. What the heck is xmlrpc.php? A look at the log itself turns up a lot of entries like this
Step6: That's starting to look better. But, this is a news site - are people really reading the little blog on it instead of news pages? That doesn't make sense. Let's look at a typical /blog/ entry in the log
Step7: Yikes! In addition to '-', there are also a million different web robots accessing the site and polluting my data. Filtering out all of them is really hard, but getting rid of the ones significantly polluting my data in this case should be a matter of getting rid of '-', anything containing "bot" or "spider", and W3 Total Cache.
Step8: Now, our new problem is that we're getting a bunch of hits on things that aren't web pages. We're not interested in those, so let's filter out any URL that doesn't end in / (all of the pages on my site are accessed in that manner - again this is applying knowledge about my data to the analysis!)
|
14,203
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, vstack
from astropy.io import fits
import multiprocessing
nproc = multiprocessing.cpu_count() // 2
from desispec.io.util import write_bintable
from desitarget
from desiutil.log import get_logger, DEBUG
log = get_logger()
if 'HOSTNAME' in os.environ.keys() and os.environ['HOSTNAME'] == 'cori19':
#import desitarget_fix_densities as desitarget
from desitarget_fix_densities.cuts import isBGS_bright, isBGS_faint
from desitarget_fix_densities.mock.mockmaker import BGSMaker
from desitarget_fix_densities.mock.mockmaker import SKYMaker
os.environ['DESITARGET'] == os.path.join(os.curdir,'desitarget_fix_densities')
else:
from desitarget.cuts import isBGS_bright, isBGS_faint
from desitarget.mock.mockmaker import BGSMaker
from desitarget.mock.mockmaker import SKYMaker
import seaborn as sns
sns.set(style='white', font_scale=1.1, palette='deep')
%matplotlib inline
simdir = os.path.join(os.getenv('DESI_ROOT'), 'spectro', 'sim', 'bgs')
dust_dir = '/Users/ioannis/research/data/sfd_dustmaps/maps'
healpixel = 26030
nside = 64
seed = 555
rand = np.random.RandomState(seed)
overwrite_spectra = False
overwrite_redshifts = False
overwrite_results = False
# Vary galaxy properties with nominal observing conditions but split
# the sample into nsim chunks to avoid memory issues.
sim1 = dict(suffix='sim01',
use_mock=True,
nsim=10,
nspec=100,
seed=11,
)
#from desisim.simexp import reference_conditions
#ref_obsconditions = reference_conditions['BGS']
ref_obsconditions = {'AIRMASS': 1.0, 'EXPTIME': 300, 'SEEING': 1.1, 'MOONALT': -60, 'MOONFRAC': 0.0, 'MOONSEP': 180}
print(ref_obsconditions)
from desistudy import bgs_sim_spectra
for sim in np.atleast_1d(sim1):
bgs_sim_spectra(sim, verbose=False, overwrite=overwrite_spectra)
from desistudy import bgs_redshifts
for sim in np.atleast_1d(sim1):
bgs_redshifts(sim, overwrite=overwrite_redshifts)
from desistudy import bgs_gather_results
for sim in np.atleast_1d(sim1):
bgs_gather_results(sim, overwrite=overwrite_results)
sim = sim1
resultfile = os.path.join(simdir, sim['suffix'], 'bgs-{}-results.fits'.format(sim['suffix']))
log.info('Reading {}'.format(resultfile))
result = Table.read(resultfile)
result
from desistudy import qa_zmag, qa_efficiency, qa_zwarn4, qa_radec
qa_zmag(result['ZTRUE'], result['RMAG'], maglabel=r'$r_{\rm DECaLS}$ (AB mag)', faintmag=20.0)
qa_efficiency(result)
qa_zwarn4(result)
#bgsmaker = BGSMaker(seed=seed)
#log.info('Reading the mock catalog for {}s'.format(bgsmaker.objtype))
#data = {'TARGET_NAME': target_name_list, 'MOCKFORMAT': 'gaussianfield',
# 'OBJID': objid_list, 'MOCKID': mockid_list, 'BRICKNAME': brickname_list,
# 'RA': ra_list, 'DEC': dec_list, 'Z': zz_list,'FILES': files_list, 'N_PER_FILE': n_per_file}
## If you want to start from an existing default dictionary, you could use:
## tdata = bgsmaker.read(healpixels=healpixel, nside=nside, dust_dir=dust_dir)
## then either overwrite the relevant details (e.g. z, mag, ra, dec, ids)
## or actually use the defaults for your analysis
## but first we have to prepare the spectra. this internal function adds required fields
## to the data dictionary that we don't want to deal with ourselves
#data = bgsmaker._prepare_spectra(data, nside_chunk=nside_chunk)
## Finally we can make the spectra
#flux, wave, meta, targets, truth = bgsmaker.make_spectra(data)
from desistudy import write_templates
write_templates(outfile, flux, wave, meta)
from desisim.scripts.quickspectra import sim_spectra
simdata = bgs_write_simdata(sim,rand,overwrite=overwrite_spectra)
for ii, simdata1 in enumerate(simdata):
# Generate the observing conditions dictionary.
obs = simdata2obsconditions(simdata1)
# Generate the rest-frame templates. Currently not writing out the rest-frame
# templates but we could.
flux, wave, meta = bgs_make_templates(sim, rand, BGSmaker)
truefile = os.path.join(simdir, sim['suffix'], 'bgs-{}-{:03}-true.fits'.format(sim['suffix'], ii))
if overwrite or not os.path.isfile(truefile):
write_templates(truefile, flux, wave, meta)
spectrafile = os.path.join(simdir, sim['suffix'], 'bgs-{}-{:03}.fits'.format(sim['suffix'], ii))
if overwrite or not os.path.isfile(spectrafile):
sim_spectra(wave, flux, 'bgs', spectrafile, obsconditions=obs,
sourcetype='bgs', seed=sim['seed'], expid=ii)
else:
print('File {} exists...skipping.'.format(spectrafile))
#SKY = SKYMaker(seed=seed)
#skydata = SKY.read(healpixels=healpixel, nside=nside, dust_dir=dust_dir)
#skyflux, skywave, skymeta, skytargets, skytruth = SKY.make_spectra(skydata)
#SKY.select_targets(skytargets, skytruth)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Establish the I/O path, random seed, and path to the dust maps and desired healpixel.
Step2: All or none of the output files can be overwritten using these keywords.
Step3: Set up the simulation parameters.
Step4: Generate Spectra
Step5: Fit the redshifts.
Step6: Gather the results.
Step7: Analyze the outputs.
Step8: BGS
Step9: Finally demonstrate how to generate (empyt) SKY spectra.
|
14,204
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
%matplotlib inline
df = pd.read_csv('stroopdata.csv')
df['diff'] = df['Incongruent'] - df['Congruent']
df
df.describe()
df.plot.scatter(x='Congruent',y='Incongruent');
(df.Incongruent - df.Congruent).plot.hist();
%%R
n = 24
mu = 7.964792
s = 4.864827
CL = 0.95
n = 24
# z = round(qnorm((1-CL)/2, lower.tail=F),digits=2)
SE = s/sqrt(n)
t = mu/SE
t_crit = round(qt((1-CL)/2,df=n-1),digits=3)
c(t,c(-t_crit,t_crit))
%%R
ME = t*SE
c(mu+ME,mu-ME)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The experiment takes participants with two test, congruent task and incongruent task. Congruent task is word with agreeing text and font color, while incongruent is a different text and its font color. Both of the task require the participants to say it out loud the word that are being display, and press 'Finish' button to see which time do they take. The control group is the congruent task, while experiment group is ingconruent task.
Step2: The measure of tendency that will be used in this situation is mean, and measure of variability is standard deviation.
Step3: The plot shown a moderaly weak correlation between congruent variable and incongruent variable.
Step4: We can see that is the difference is right skewed distribution. This makes sense, since congruent task is easier, there shouldn't be any participants that solve incongruent task shorter tha congruent task. And it should be the longer time it took for the participants at solving incongruent task, the less should be for the number of participants.
Step5: Since our t-statistics, 8.02 is higher than the t critical values, we can conclude that the data provides convincing evidence that the time participants took for incongruent task is significantly different than when they took congruent task.
|
14,205
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import numpy as np
import psycopg2
import psycopg2.extras
from itertools import chain
from collections import Counter, defaultdict
import requests
import imageio
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image, ImageDraw
from io import BytesIO
from hashlib import md5
from IPython.display import display, HTML
from itertools import chain
import mwparserfromhell
from keras.models import Model
from keras.layers import Embedding, Dense, Input, Lambda, Reshape, merge
import keras.backend as K
from keras.layers.merge import Dot
from sklearn.manifold import TSNE
IMAGE_PATH_EN = 'http://upload.wikimedia.org/wikipedia/en/%s/%s/%s'
IMAGE_PATH_COMMONS = 'http://upload.wikimedia.org/wikipedia/commons/%s/%s/%s'
image_cache = 'movie_images'
def fetch_image(image_name):
if not image_name or image_name.endswith('.tiff'):
return None
image_name = image_name.replace(' ', '_')
if image_name[0].upper() != image_name[0]:
image_name = image_name.capitalize()
file_path = os.path.join(image_cache, image_name)
if os.path.isfile(file_path):
return image_name
else:
m = md5()
m.update(image_name.encode('utf-8'))
c = m.hexdigest()
path = IMAGE_PATH_EN % (c[0], c[0:2], image_name)
r = requests.get(path)
if r.status_code == 404:
path = IMAGE_PATH_COMMONS % (c[0], c[0:2], image_name)
r = requests.get(path)
if r.status_code == 404:
print image_name
return None
try:
image = Image.open(BytesIO(r.content))
except IOError:
return None
except ValueError:
return None
image.save(file(file_path, 'w'))
image.thumbnail((240, 640), Image.ANTIALIAS)
res = BytesIO()
if image.mode == 'P':
image = image.convert('RGB')
try:
image.save(res, 'WEBP', quality=15)
except IOError as err:
print image_name, err.message
return None
return image_name
fetch_image('Suicide Squad (film) Poster.png')
postgres_conn = psycopg2.connect('dbname=douwe user=notebook')
postgres_cursor = postgres_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
print 'Getting top movies...'
postgres_cursor.execute(
"SELECT wikipedia.*, wikistats.viewcount FROM wikipedia "
"JOIN wikistats ON wikipedia.title = wikistats.title WHERE wikipedia.infobox = 'film' "
"ORDER BY wikistats.viewcount DESC limit 10000"
)
print 'done'
movies = []
for movie in postgres_cursor:
wikicode = mwparserfromhell.parse(movie['wikitext'])
image = None
for template in wikicode.filter_templates():
if template.name.lower().startswith('infobox '):
for param in template.params:
key = param.name.strip()
if key == 'image':
image = param.value.strip()
break
if image:
image_name = fetch_image(image)
movies.append((movie['title'], image_name, [unicode(x.title) for x in wikicode.filter_wikilinks()], movie['viewcount']))
len(movies)
ref_counts = Counter()
for _, _, refs, _ in movies:
ref_counts.update(refs)
all_refs = [ref for ref, count in ref_counts.items() if count > 1]
ref_to_id = {ref: idx for idx, ref in enumerate(all_refs)}
movie_to_id = {title: idx for idx, (title, _, _, _) in enumerate(movies)}
len(all_refs), len(ref_counts)
refs_movies = defaultdict(list)
for movie, image, refs, viewcounts in movies:
movie_id = movie_to_id[movie]
for ref in refs:
ref_id = ref_to_id.get(ref)
if ref_id:
refs_movies[ref_id].append(movie_id)
refs_movies = list(refs_movies.items())
len(refs_movies)
import random
random.seed(5)
def data_generator(refs_movies, negative_ratio=5, yield_movie_pairs=True):
random.shuffle(refs_movies)
for ref, movies in refs_movies:
if yield_movie_pairs:
if len(movies) < 2: continue
a, b = random.sample(movies, 2)
else:
a = ref
b = random.choice(movies)
yield a, b, 1
seen = set(movies)
left = negative_ratio
while left > 0:
n = random.randrange(len(movie_to_id))
if not n in seen:
left -= 1
seen.add(n)
yield a, n, -1
def batchify(gen, batch_size):
ax, bx, lx = [], [], []
while True:
for a, b, label in gen():
ax.append(a)
bx.append(b)
lx.append(label)
if len(ax) > batch_size:
yield { 'first': np.asarray(ax), 'second': np.asarray(bx)}, np.asarray(lx)
del ax[:]
del bx[:]
del lx[:]
next(batchify(lambda: data_generator(refs_movies), batch_size=10))
N = 20
def model_simple():
src = Input(name='first', shape=(1,))
dst = Input(name='second', shape=(1,))
src_embedding = Embedding(name='src_embedding', input_dim=len(movie_to_id), output_dim=N)(src)
dst_embedding = Embedding(name='dst_embedding', input_dim=len(movie_to_id), output_dim=N)(dst)
dot = merge([src_embedding, dst_embedding], mode='cos')
dot = Reshape((1,))(dot)
model = Model(inputs=[src, dst], outputs=[dot])
model.compile(optimizer='nadam', loss='mse')
return model
model = model_simple()
model.fit_generator(
batchify(lambda: data_generator(refs_movies, yield_movie_pairs=True), 2048),
epochs=25,
steps_per_epoch=3500,
verbose=2
)
src = model.get_layer('src_embedding')
src_weights = src.get_weights()[0]
lens = np.linalg.norm(src_weights, axis=1)
normalized = (src_weights.T / lens).T
np.linalg.norm(normalized[0]), normalized.shape
def neighbors(movie):
dists = np.dot(normalized, normalized[movie_to_id[movie]])
closest = np.argsort(dists)[-10:]
for c in closest:
print(c, movies[c][0], dists[c])
neighbors('Star Wars (film)')
model = TSNE(n_components=2, random_state=0)
np.set_printoptions(suppress=True)
xy = model.fit_transform(normalized)
xy
plt.scatter(xy[:,0][:200], xy[:,1][:200])
plt.show()
w = 144
h = 220
res = []
sz = 100
sz_1 = sz + 1
taken = [[False] * sz_1 for _ in range(sz_1)]
x_min = xy.T[0].min()
y_min = xy.T[1].min()
x_max = xy.T[0].max()
y_max = xy.T[1].max()
img = Image.new('RGB', (sz_1 * w, sz_1 * h))
drw = ImageDraw.Draw(img)
c1 = 0
c2 = 0
for movie, coo in zip(movies, xy):
if not movie[1]:
continue
poster = Image.open(image_cache + '/' + movie[1])
poster.thumbnail((w, h), Image.ANTIALIAS)
x = int(sz * (coo[0] - x_min) / (x_max - x_min))
y = int(sz * (coo[1] - y_min) / (y_max - y_min))
if taken[x][y]:
c1 += 1
for dx, dy in (-1, 0), (2, 0), (-1, -1), (0, 2):
x += dx
y += dy
if x >= 0 and y >= 0 and x < sz_1 and y < sz_1 and not taken[x][y]:
break
else:
continue
c2 += 1
taken[x][y] = True
x *= w
y *= h
drw.rectangle((x, y, x + w, y + h), (50, 50, 50))
res.append((x, y, movie[1], poster.size[0], poster.size[1]))
img.paste(poster, (x + (w - poster.size[0]) / 2, y + (h - poster.size[1]) / 2))
img.save(open('/home/notebook/notebook/poster.png', 'wb'))
x_min, y_min, x_max, y_max, c1, c2
cursor = postgres_conn.cursor()
cursor.execute('DROP TABLE IF EXISTS movie_recommender')
cursor.execute('CREATE TABLE movie_recommender ('
' wikipedia_id TEXT PRIMARY KEY,'
' viewcount INT,'
' image TEXT,'
' x FLOAT,'
' y FLOAT,'
' vec FLOAT[] NOT NULL DEFAULT \'{}\''
')')
cursor.execute('CREATE INDEX movie_recommender_vec ON movie_recommender USING gin(vec)')
cursor.execute('CREATE INDEX movie_recommender_name_pattern ON movie_recommender USING btree(lower(wikipedia_id) text_pattern_ops)')
cursor.execute('CREATE INDEX movie_recommender_viewcount ON movie_recommender(viewcount)')
for movie, coo, weights in zip(movies, xy, src_weights):
x = int(sz * (coo[0] - x_min) / (x_max - x_min)) * w
y = int(sz * (coo[1] - y_min) / (y_max - y_min)) * h
v_len = np.linalg.norm(weights)
cursor.execute('INSERT INTO movie_recommender (wikipedia_id, image, viewcount, x, y, vec) '
'VALUES (%s, %s, %s, %s, %s, %s)',
(movie[0], movie[1], movie[-1], x, y,
[float(weight) / v_len for weight in weights]))
postgres_conn.commit()
cursor.close()
neighbors('Star Wars (film)')
coo = xy[639]
x = int(sz * (coo[0] - x_min) / (x_max - x_min)) * w
y = int(sz * (coo[1] - y_min) / (y_max - y_min)) * h
x, y
frames = []
size = 4800
i = 0
x1 = x + 75
y1 = y + 200
while size > 480:
width2 = int(size / 2)
height2 = int(size / 3)
img_crop = img.crop((x1 - width2, y1 - height2, x1 + width2, y1 + height2))
img_crop = img_crop.resize((600, 400))
fn = 'movie_images/frame_%d.png' % i
img_crop.save(fn)
frames.append(fn)
size /= 1.2
i += 1
len(frames)
imageio.mimsave('movie_recommend.gif', [imageio.imread(frame) for frame in frames], 'GIF', duration=0.5)
display(HTML('<img src="movie_recommend.gif">'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The fetch_image function below fetches an image from wikipedia given a wikipedia id for that image
Step2: The code here relies on wikipedia having been imported into Postgres using Wiki Import. So we setup a connection to postgres, fetch the 10000 most popular movies and then extract for each the movie name, the image for the movie and the outgoing links from that page
Step3: Now that we have all the movies, extract all the outgoing links (refs) and keep the ones that are used more than once.
Step4: Now createthe refs_movie structure that holds for each ref, the movies that refer to it
Step5: We now construct a generator that yields tupels that encode whether or not two movies link to the same entity. (The generator can also be used with yield_movie_pairs=False to get movie/entity pairs). +1 indicates co-occurance, -1 indicates non occurance
Step6: The model itself is fairly straightforward
Step7: It converges quite quickly too. The achieved loss isn't super impressive - 0.4 or so
Step8: Extract the weights for the movie embeddings
Step9: Test whether it works by looking for "star wars" related movies.
Step10: Another way to explore the data is by using TSNE to group the movies on a two dimensional grid.
Step11: Scatter Plot the first 300 movies to get an idea of whether what the shape is we are looking at
Step12: We can now create a large bitmap that has the movie posters of all movies on it which we can then use to interactively explore neighborhoods of movies. We just loop through all the movies in reverse order and plot them at the position where they are needed, making sure to
Step13: As a last step, we write the resulting data back into the database to power the interactive demo
|
14,206
|
<ASSISTANT_TASK:>
Python Code:
#import pandas and numpy libraries
import pandas as pd
import numpy as np
import sys #sys needed only for python version
#import gaussian naive bayes from scikit-learn
import sklearn as sk
#seaborn for pretty plots
import seaborn as sns
#display versions of python and packages
print('\npython version ' + sys.version)
print('pandas version ' + pd.__version__)
print('numpy version ' + np.__version__)
print('sk-learn version ' + sk.__version__)
print('seaborn version ' + sns.__version__)
#read in data. it's comma-separated with no column names.
df = pd.read_csv('agaricus-lepiota.data', sep=',', header=None,
error_bad_lines=False, warn_bad_lines=True, low_memory=False)
# set pandas to output all of the columns in output
pd.options.display.max_columns = 25
#show the first 5 rows
print(df.sample(n=5))
#manually add column names from documentation (1st col is class: e=edible,p=poisonous; rest are attributes)
df.columns = ['class','cap-shape','cap-surface','cap-color','bruises','odor','gill-attachment',
'gill-spacing','gill-size','gill-color','stalk-shape','stalk-root',
'stalk-surf-above-ring','stalk-surf-below-ring','stalk-color-above-ring','stalk-color-below-ring',
'veil-type','veil-color','ring-number','ring-type','spore-color','population','habitat']
print("Example values:\n")
print(df.iloc[3984]) #this one has a ? value - how are those treated by classifier?
#show plots in notebook
%matplotlib inline
#bar chart of classes using pandas plotting
print(df['class'].value_counts())
#df['class'].value_counts().plot(kind='bar')
#put the features into X (everything except the 0th column)
X = pd.DataFrame(df, columns=df.columns[1:len(df.columns)], index=df.index)
#put the class values (0th column) into Y
Y = df['class']
#encode the class labels as numeric
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(Y)
#print(le.classes_)
#print(np.array(Y))
#Y values now boolean values; poison = 1
y = le.transform(Y)
#print(y_train)
#have to initialize or get error below
x = pd.DataFrame(X,columns=[X.columns[0]])
#encode each feature column and add it to x_train (one hot encoder requires numeric input?)
for colname in X.columns:
le.fit(X[colname])
#print(colname, le.classes_)
x[colname] = le.transform(X[colname])
#encode the feature labels using one-hot encoding
from sklearn import preprocessing
oh = preprocessing.OneHotEncoder(categorical_features='all')
oh.fit(x)
xo = oh.transform(x).toarray()
#print(xo)
print('\nEncoder Value Counts Per Column:')
print(oh.n_values_)
print('\nExample Feature Values - row 1 in X:')
print(X.iloc[1])
print('\nExample Encoded Feature Values - row 1 in xo:')
print(xo[1])
print('\nClass Values (Y):')
print(np.array(Y))
print('\nEncoded Class Values (y):')
print(y)
#split the dataset into training and test sets
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(xo, y, test_size=0.33)
#initialize and fit the naive bayes classifier
from sklearn.naive_bayes import BernoulliNB
skbnb = BernoulliNB()
skbnb.fit(x_train,y_train)
train_predict = skbnb.predict(x_train)
#print(train_predict)
#see how accurate the training data was fit
from sklearn import metrics
print("Training accuracy:",metrics.accuracy_score(y_train, train_predict))
#use the trained model to predict the test values
test_predict = skbnb.predict(x_test)
print("Testing accuracy:",metrics.accuracy_score(y_test, test_predict))
print("\nClassification Report:")
print(metrics.classification_report(y_test, test_predict, target_names=['edible','poisonous']))
print("\nConfusion Matrix:")
skcm = metrics.confusion_matrix(y_test,test_predict)
#putting it into a dataframe so it prints the labels
skcm = pd.DataFrame(skcm, columns=['predicted-edible','predicted-poisonous'])
skcm['actual'] = ['edible','poisonous']
skcm = skcm.set_index('actual')
#NOTE: NEED TO MAKE SURE I'M INTERPRETING THE ROWS & COLS RIGHT TO ASSIGN THESE LABELS!
print(skcm)
print("\nScore (same thing as test accuracy?): ", skbnb.score(x_test,y_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The dataset doesn't include column names, and the values are text characters
Step2: Added column names from the UCI documentation
Step3: The dataset is split fairly evenly between the edible and poison classes
Step4: Let's see how well our classifier can identify poisonous mushrooms by combinations of features
|
14,207
|
<ASSISTANT_TASK:>
Python Code:
names_df = pd.read_csv("./IMA_mineral_names.txt", sep=',', header=None, names=['names'])
names_df['names'] = names_df['names'].str.strip().str.lower()
names_df['len'] = names_df['names'].str.len()
names_df['tuple'] = names_df['names'].apply(lambda x: tuple(sorted(set(x))))
names_df['setlen'] = names_df['tuple'].apply(lambda x: len(x))
names_df['set_per_len'] = names_df['setlen']/names_df['len']
names_df.head(5)
len(names_df)
def sort_and_return_smallest(df):
if len(df) == 1:
return df
df = df.sort_values(by=['len', 'names'])
return df.iloc[:1, :]
%time names_set = names_df.groupby(by='tuple', as_index=False).apply(sort_and_return_smallest)
len(names_set)
def sort_and_return_smallest_duplicates(df):
if len(df) == 1:
return list(df['names'])
df = df.sort_values(by=['len', 'names'])
names = df.loc[df['len'] == df['len'].iloc[0], 'names']
return list(names)
%time names_duplicates = names_df.groupby(by='tuple', as_index=False).apply(sort_and_return_smallest_duplicates)
len(names_duplicates)
# In case some of these are in the chosen set
duplicate_name_dict = {}
for value in names_duplicates:
if len(value) > 1:
duplicate_name_dict[value[0]] = value[1:]
names_set.set_index(['names'], inplace=True)
names_set.head()
letter_df = pd.DataFrame(index=names_set.index, columns=list(string.ascii_lowercase), dtype=bool)
letter_df.loc[:] = False
%%time
for name, set_ in zip(names_set.index, names_set['tuple']):
for letter in set_:
letter_df.loc[name, letter] = True
lowest_count_letter = letter_df.sum(0).argmin()
lowest_count_letter
# Get subset based on the chosen letter
subsetlen = letter_df[letter_df[lowest_count_letter]].sum(1)
name_len = subsetlen.index.str.len()
setlen = pd.DataFrame({'set_per_len' : subsetlen/name_len, 'len' : name_len})
setlen.head()
def get_min_set(df, current_items, m=46, sort_by_len=False, n_search=20):
# Gather results
results = []
# Get letter with lowest number of options
letter = df.sum(0)
letter = letter[letter > 0].argmin()
# Get subset based on the chosen letter
subsetlen = df.loc[df[letter], :].sum(1)
name_len = subsetlen.index.str.len()
setlen = pd.DataFrame({'set_per_len' : subsetlen/name_len, 'len' : name_len})
if sort_by_len:
order_of_operations = setlen.sort_values(by=['len', 'set_per_len'], ascending=True).index
else:
order_of_operations = setlen.sort_values(by=['set_per_len', 'len'], ascending=False).index
# Loop over the mineral names with chosen letter
# Ordered based on the (setlen / len)
for i, (name, letter_bool) in enumerate(df.loc[order_of_operations, :].iterrows()):
if i > n_search:
break
if sum(map(len, current_items))+len(name) >= m:
continue
# Get df containing rest of the letters
df_ = df.copy()
df_.loc[:, letter_bool] = False
# If letters are exhausted there is one result
# Check if the result is less than chosen limit m
if df_.sum(0).sum() == 0 and sum(map(len, current_items))+len(name) < m:
# This result is "the most optimal" under these names
current_items_ = current_items + [name]
len_current_items_ = sum(map(len, current_items_))
len_unique = len(set("".join(current_items_)))
results.append((len_current_items_, current_items_))
if len_current_items_ < 41:
print("len", len_current_items_, "len_unique", len_unique, current_items_, "place 1", flush=True)
continue
# Remove mineral names without new letters
df_ = df_.loc[df_.sum(1) != 0, :]
if df_.sum(0).sum() == 0:
if sum(map(len, current_items))+len(name) < m:
unique_letters = sum(map(len, map(set, current_items + [name])))
if unique_letters == len(string.ascii_lowercase):
# Here is one result (?)
current_items_ = current_items + [name]
len_current_items_ = sum(map(len, current_items_))
len_unique = len(set("".join(current_items_)))
results.append((len_current_items_, current_items_))
if len_current_items_ < 41:
print("len", len_current_items_, "len_unique", len_unique, current_items_, "place 1", flush=True)
continue
current_items_ = current_items + [name]
optimal_result = get_min_set(df_, current_items_, m=m, sort_by_len=sort_by_len, n_search=n_search)
if len(optimal_result):
results.extend(optimal_result)
return results
%%time
res_list = []
order_of_oparations = setlen.loc[letter_df.loc[:, lowest_count_letter], :].sort_values(by=['set_per_len', 'len'], ascending=False).index
for i, (name, letter_bool) in enumerate(letter_df.ix[order_of_oparations].iterrows()):
print(name, i+1, "/", len(order_of_oparations), flush=True)
df_ = letter_df.copy()
df_.loc[:, letter_bool] = False
res = get_min_set(df_, [name], m=45, sort_by_len=False, n_search=20)
res_list.extend(res)
res_df = pd.DataFrame([[item[0]] + item[1] for item in res_list]).sort_values(by=0)
res_df.head()
%%time
res_list_ = []
order_of_oparations = setlen.loc[letter_df.loc[:, lowest_count_letter], :].sort_values(by=['set_per_len', 'len'], ascending=False).index
for i, (name, letter_bool) in enumerate(letter_df.ix[order_of_oparations].iterrows()):
print(name, i+1, "/", len(order_of_oparations), flush=True)
df_ = letter_df.copy()
df_.loc[:, letter_bool] = False
res_ = get_min_set(df_, [name], m=45, sort_by_len=True, n_search=20)
res_list_.extend(res_)
#res_df_ = pd.DataFrame([[item[0]] + item[1] for item in res_list_]).sort_values(by=0)
res_df.shape #, res_df_.shape
%time res_df.to_csv("./example_but_not_optimum_no_duplicates.csv")
optimum = res_df[res_df[0] == res_df.iloc[0, 0]]
optimum.iloc[:, 1:].applymap(lambda x: duplicate_name_dict.get(x, None))
optimum
optimum.apply(lambda x: "".join(sorted(set("".join(x.iloc[1:6].values)))) == string.ascii_lowercase, axis=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Remove duplicates
Step2: Create letter table
Step3: Find argmin in the letter distribution
Step4: Recursion
Step5: The effective ratio criteria
Step6: The shortest name length criteria
Step7: Save the results
Step8: Check for duplicates
Step9: Validate results
|
14,208
|
<ASSISTANT_TASK:>
Python Code:
!pip install tensorflow-gpu
%env CUDA_DEVICE_ORDER=PCI_BUS_ID
%env CUDA_VISIBLE_DEVICES=0,1
import os
print(os.environ["CUDA_DEVICE_ORDER"])
print(os.environ["CUDA_VISIBLE_DEVICES"])
import tensorflow as tf
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
tf.test.gpu_device_name()
!nvcc --version
!pip install mxnet-cu90
# From https://stackoverflow.com/questions/49076092/is-there-a-way-to-check-if-mxnet-uses-my-gpu/49079940#49079940
# https://developer.download.nvidia.com/compute/DCGM/docs/nvidia-smi-367.38.pdf
!"C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi" --query-gpu=timestamp,name,pci.bus_id,driver_version,pstate,pcie.link.gen.max,pcie.link.gen.current,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used --format=csv
import mxnet as mx
def gpu_device(gpu_number=0):
try:
_ = mx.nd.array([1, 2, 3], ctx=mx.gpu(gpu_number))
except mx.MXNetError:
return None
return mx.gpu(gpu_number)
gpu_device()
mx.gpu(0)
from __future__ import print_function
import numpy as np
import mxnet as mx
from mxnet import nd, autograd, gluon
data_ctx = mx.cpu()
model_ctx = mx.cpu()
# model_ctx = mx.gpu(0)
batch_size = 64
num_inputs = 784
num_outputs = 10
num_examples = 60000
def transform(data, label):
return data.astype(np.float32)/255, label.astype(np.float32)
train_data = mx.gluon.data.DataLoader(mx.gluon.data.vision.MNIST(train=True, transform=transform),
batch_size, shuffle=True)
test_data = mx.gluon.data.DataLoader(mx.gluon.data.vision.MNIST(train=False, transform=transform),
batch_size, shuffle=False)
class MLP(gluon.Block):
def __init__(self, **kwargs):
super(MLP, self).__init__(**kwargs)
with self.name_scope():
self.dense0 = gluon.nn.Dense(64)
self.dense1 = gluon.nn.Dense(64)
self.dense2 = gluon.nn.Dense(10)
def forward(self, x):
x = nd.relu(self.dense0(x))
x = nd.relu(self.dense1(x))
x = self.dense2(x)
return x
net = MLP()
net.collect_params().initialize(mx.init.Normal(sigma=.01), ctx=model_ctx)
data = nd.ones((1,784))
net(data.as_in_context(model_ctx))
class MLP(gluon.Block):
def __init__(self, **kwargs):
super(MLP, self).__init__(**kwargs)
with self.name_scope():
self.dense0 = gluon.nn.Dense(64, activation="relu")
self.dense1 = gluon.nn.Dense(64, activation="relu")
self.dense2 = gluon.nn.Dense(10)
def forward(self, x):
x = self.dense0(x)
print("Hidden Representation 1: %s" % x)
x = self.dense1(x)
print("Hidden Representation 2: %s" % x)
x = self.dense2(x)
print("Network output: %s" % x)
return x
net = MLP()
net.collect_params().initialize(mx.init.Normal(sigma=.01), ctx=model_ctx)
net(data.as_in_context(model_ctx))
num_hidden = 64
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Dense(num_hidden, activation="relu"))
net.add(gluon.nn.Dense(num_hidden, activation="relu"))
net.add(gluon.nn.Dense(num_outputs))
net.hybridize()
net.collect_params().initialize(mx.init.Normal(sigma=.1), ctx=model_ctx)
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': .01})
def evaluate_accuracy(data_iterator, net):
acc = mx.metric.Accuracy()
for i, (data, label) in enumerate(data_iterator):
data = data.as_in_context(model_ctx).reshape((-1, 784))
label = label.as_in_context(model_ctx)
output = net(data)
predictions = nd.argmax(output, axis=1)
acc.update(preds=predictions, labels=label)
return acc.get()[1]
!pip install mxboard
!pip install tensorboard
from mxboard import SummaryWriter
sw = SummaryWriter(logdir='logs', flush_secs=5)
epochs = 10
smoothing_constant = .01
# collect parameter names for logging the gradients of parameters in each epoch
params = net.collect_params()
param_names = params.keys()
global_step = 0
for e in range(epochs):
cumulative_loss = 0
for i, (data, label) in enumerate(train_data):
data = data.as_in_context(model_ctx).reshape((-1, 784))
label = label.as_in_context(model_ctx)
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
sw.add_scalar(tag='cross_entropy', value=loss.mean().asscalar(), global_step=global_step)
if i == 0:
sw.add_image('minist_first_minibatch', data.reshape((batch_size, 1, 28, 28)), e)
if e == 0:
sw.add_graph(net)
grads = [i.grad() for i in net.collect_params().values()]
for i, name in enumerate(param_names):
sw.add_histogram(tag=name, values=grads[i], global_step=e, bins=1000)
global_step += 1
loss.backward()
trainer.step(data.shape[0])
cumulative_loss += nd.sum(loss).asscalar()
test_accuracy = evaluate_accuracy(test_data, net)
train_accuracy = evaluate_accuracy(train_data, net)
sw.add_scalar(tag='accuracy_curves', value=('train_acc', train_accuracy), global_step=e)
sw.add_scalar(tag='accuracy_curves', value=('valid_acc', test_accuracy), global_step=e)
print("Epoch %s. Loss: %s, Train_acc %s, Test_acc %s" %
(e, cumulative_loss/num_examples, train_accuracy, test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From https
Step2: From https
|
14,209
|
<ASSISTANT_TASK:>
Python Code:
for st_type, ways in abq_st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
if name != better_name:
print name, "=>", better_name
Honolulu:
Kalakaua Ave => Kalakaua Avenue
Lusitania St. => Lusitania Street
...
Albuquerque:
Valley View Dr NW => Valley View Drive Northwest
Eubank Northeast Ste E-18 => Eubank Northeast Suite E-18
...
def map_aggregate(db, collection, pipeline):
db_collection = db[collection]
result = db_collection.aggregate(pipeline)
return result
# This function will be reused in further MongoDB explorations.
# Only the pipeline will need to be updated.
def make_city_pipeline(city):
pipeline = [{"$match":{"address.postcode":{"$exists":1},
"city_name":city}},
{"$group": {"_id": {"City":"$city_name",
"Zip":"$address.postcode"},
"count": {"$sum": 1}}},
{"$project": {'_id':0,
"City":"$_id.City",
"Zipcode":"$_id.Zip",
"Count":"$count"}},
{"$sort": {"Count": -1}},
{"$limit" : 10 }]
return pipeline
pipeline = make_city_pipeline('honolulu')
result1 = map_aggregate(db, 'cities', pipeline)
pipeline = make_city_pipeline('albuquerque')
result2 = map_aggregate(db, 'cities', pipeline)
u'result': [{u'City': u'honolulu', u'Count': 219, u'Zipcode': 96815
...}
u'result': [{u'City': u'albuquerque', u'Count': 476, u'Zipcode': 87123
...}]}
albuquerque_new-mexico.osm - 53.8 MB
albuquerque_new-mexico.osm.json - 65.7 MBm
honolulu_hawaii.osm - 43.8 MB
honolulu_hawaii.osm.json - 54.4 MB
print "Both Cities:", db.cities.find().count()
print "Honolulu:", db.cities.find({'city_name':'honolulu'}).count()
print "Albuquerque:", db.cities.find({'city_name':'albuquerque'}).count()
Both Cities: 489274
Honolulu: 227752
Albuquerque: 261522
print "Both Cities:", db.cities.find({'type':'node'}).count()
print "Honolulu:", db.cities.find({'type':'node', 'city_name':'honolulu'}).count()
print "Albuquerque:", db.cities.find({'type':'node','city_name':'albuquerque'}).count()
Both Cities: 432587
Honolulu: 206262
Albuquerque: 226325
print "Both Cities:", db.cities.find({'type':'way'}).count()
print "Honolulu:", db.cities.find({'type':'way', 'city_name':'honolulu'}).count()
print "Albuquerque:", db.cities.find({'type':'way', 'city_name':'albuquerque'}).count()
Both Cities: 56648
Honolulu: 21459
Albuquerque: 35189
print "Contributors:", len(db.cities.distinct("created.user"))
Contributors: 611
pipeline = [{"$match":{"amenity":{"$exists":1}, "city_name":city}},
{"$group": {"_id": {"City":"$city_name", "Amenity":"$amenity"},
"count": {"$sum": 1}}},
{"$project": {'_id':0,"City":"$_id.City",
"Amenity":"$_id.Amenity", "Count":"$count"}},
{"$sort": {"Count": -1}},
{"$limit" : 5 }]
u'result': [{u'Amenity': u'parking', u'City': u'honolulu', u'Count': 280},
{u'Amenity': u'restaurant', u'City': u'honolulu', u'Count': 123},..}
u'result': [{u'Amenity': u'parking', u'City': u'albuquerque',u'Count': 1270},
{u'Amenity': u'school', u'City': u'albuquerque', u'Count': 258},..}
pipeline = [{"$match":{"created.user":{"$exists":1},
"city_name":city}},
{"$group": {"_id": {"City":"$city_name", "User":"$created.user"},
"count": {"$sum": 1}}},
{"$project": {'_id':0, "City":"$_id.City",
"User":"$_id.User", "Count":"$count"}},
{"$sort": {"Count": -1}},
{"$limit" : 5 }]
u'result': [{u'City': u'honolulu', u'Count': 98401, u'User': u'Tom_Holland'},
{u'City': u'honolulu', u'Count': 13051, u'User': u'ikiya'},...}
u'result': [{u'City': u'albuquerque', u'Count': 88041, u'User': u'EdHillsman'},
{u'City': u'albuquerque', u'Count': 37604, u'User': u'anjbe'},...}
pipeline = [{"$match":{"amenity":{"$exists":1},
"amenity":"place_of_worship", "city_name":city}},
{"$group":{"_id": {"City":"$city_name", "Religion":"$religion"},
"count":{"$sum":1}}},
{"$project":{"_id":0, "City":"$_id.City",
"Religion":"$_id.Religion", "Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":6}]
u'result': [{u'City': u'honolulu', u'Count': 14, u'Religion': u'christian'},
{u'City': u'honolulu', u'Count': 5, u'Religion': u'buddhist'},..}
u'result': [{u'City': u'albuquerque', u'Count': 186,u'Religion': u'christian'},
{u'City': u'albuquerque', u'Count': 36, u'Religion': 'NoNameGiven'},..}
pipeline = [{"$match":{"amenity":{"$exists":1},
"amenity":"restaurant", "city_name":city}},
{"$group":{"_id":{"City":"$city_name","Food":"$cuisine"},
"count":{"$sum":1}}},
{"$project":{"_id":0, "City":"$_id.City",
"Food":"$_id.Food", "Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":6}]
u'result': [{u'City': u'honolulu', u'Count': 7, u'Food': u'pizza'},
{u'City': u'honolulu', u'Count': 3, u'Food': u'regional'},
{u'City': u'honolulu', u'Count': 3, u'Food': u'japanese'}...}
u'result': [{u'City': u'albuquerque', u'Count': 16, u'Food': u'mexican'},
{u'City': u'albuquerque', u'Count': 10, u'Food': u'pizza'},
{u'City': u'albuquerque', u'Count': 6, u'Food': u'american'}...}
pipeline = [{"$match":{"amenity":{"$exists":1},
"amenity":"fast_food", "city_name":city}},
{"$group":{"_id":{"City":"$city_name", "Food":"$cuisine"},
"count":{"$sum":1}}},
{"$project":{"_id":0, "City":"$_id.City",
"Food":"$_id.Food",
"Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":6}]
u'result': [{u'City': u'honolulu', u'Count': 13, u'Food': u'burger'},
{u'City': u'honolulu', u'Count': 2, u'Food': u'sandwich'},
{u'City': u'honolulu', u'Count': 2, u'Food': u'sushi'},...}
{u'ok': 1.0,
u'result': [{u'City': u'albuquerque', u'Count': 31, u'Food': u'burger'},
{u'City': u'albuquerque', u'Count': 16, u'Food': u'sandwich'},
{u'City': u'albuquerque', u'Count': 6, u'Food': u'pizza'},...}
pipeline = [{"$match":{"amenity":{"$exists":1},
"amenity":"fast_food","city_name":city}},
{"$group":{"_id":{"City":"$city_name",
"Name":"$name"},
"count":{"$sum":1}}},
{"$project":{"_id":0,
"City":"$_id.City",
"Name":"$_id.Name",
"Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":6}]
u'result': [{u'City': u'honolulu', u'Count': 8, u'Name': u"McDonald's"},
{u'City': u'honolulu', u'Count': 4, u'Name': u'Subway'},
{u'City': u'honolulu', u'Count': 3, u'Name': u'Burger King'},..}
u'result': [{u'City': u'albuquerque', u'Count': 23, u'Name': u'Subway'},
{u'City': u'albuquerque', u'Count': 12, u'Name': u"Blake's Lotaburger"},
{u'City': u'albuquerque', u'Count': 11, u'Name': u"McDonald's"},...}
pipeline = [{"$match":{"shop":{"$exists":1}, "city_name":city}},
{"$group":{"_id":{"City":"$city_name", "Shop":"$shop"},
"count":{"$sum":1}}},
{"$project": {'_id':0, "City":"$_id.City",
"Shop":"$_id.Shop", "Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":10}]
u'result': [{u'City': u'honolulu', u'Count': 50, u'Shop': u'supermarket'},
{u'City': u'honolulu', u'Count': 24, u'Shop': u'convenience'},
{u'City': u'honolulu', u'Count': 18, u'Shop': u'clothes'},...}
u'result': [{u'City': u'albuquerque', u'Count': 66, u'Shop': u'convenience'},
{u'City': u'albuquerque', u'Count': 46, u'Shop': u'supermarket'},
{u'City': u'albuquerque', u'Count': 22, u'Shop': u'car_repair'},...}
pipeline = [{"$match":{"shop":{"$exists":1},
"city_name":city, "shop":"convenience"}},
{"$group":{"_id":{"City":"$city_name", "Name":"$name"},
"count":{"$sum":1}}},
{"$project": {'_id':0, "City":"$_id.City",
"Name":"$_id.Name", "Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":5}]
u'result': [{u'City': u'honolulu', u'Count': 7, u'Name': u'ABC Store'},...}
u'result': [{u'City': u'albuquerque', u'Count': 14, u'Name': u'Circle K'}...}
from IPython import utils
from IPython.core.display import HTML
import os
def css_styling():
Load default custom.css file from ipython profile
base = utils.path.get_ipython_dir()
styles = "<style>\n%s\n</style>" % (open(os.path.join(base,'profile_custom1/static/custom/custom.css'),'r').read())
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <hr>
Step2: <hr>
Step3: Number of documents
Step4: Number of node nodes.
Step5: Number of way nodes.
Step6: Total Number of contributors.
Step7: <hr>
Step8: <hr>
Step9: <hr>
Step10: <hr>
Step11: <hr>
Step12: <hr>
Step13: <hr>
Step14: <hr>
Step16: <div align="center">Back to top</div>
|
14,210
|
<ASSISTANT_TASK:>
Python Code:
#%matplotlib notebook
# imports
from importlib import reload
import numpy as np
import os
from pkg_resources import resource_filename
from matplotlib import pyplot as plt
from scipy import interpolate
from astropy import units
from astropy.table import Table
from astropy.cosmology import Planck15
from frb.dm import igm
reload(igm)
DM = igm.average_DM(1.)
DM
DM_cumul, zeval = igm.average_DM(1., cumul=True)
# Inoue approximation
DM_approx = 1000. * zeval * units.pc / units.cm**3
plt.clf()
ax = plt.gca()
ax.plot(zeval, DM_cumul, label='JXP')
ax.plot(zeval, DM_approx, label='Approx')
# Label
ax.set_xlabel('z')
ax.set_ylabel(r'${\rm DM}_{\rm IGM} [\rm pc / cm^3]$ ')
# Legend
legend = plt.legend(loc='lower right', scatterpoints=1, borderpad=0.2,
handletextpad=0.1, fontsize='large')
plt.show()
plt.clf()
ax = plt.gca()
ax.plot(zeval, DM_approx/DM_cumul, label='Approx/JXP')
#ax.plot(zeval, DM_approx, label='Approx')
# Label
ax.set_xlabel('z')
ax.set_ylabel(r'Ratio of ${\rm DM}_{\rm IGM} [\rm pc / cm^3]$ ')
# Legend
legend = plt.legend(loc='upper right', scatterpoints=1, borderpad=0.2,
handletextpad=0.1, fontsize='large')
plt.show()
DM_cumul[0:10]
DM_approx[0:10]
zeval[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DM -- Piece by piece (as coded)
Step2: Cumulative plot
|
14,211
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import ipyvolume as ipv
V = np.zeros((128,128,128)) # our 3d array
# outer box
V[30:-30,30:-30,30:-30] = 0.75
V[35:-35,35:-35,35:-35] = 0.0
# inner box
V[50:-50,50:-50,50:-50] = 0.25
V[55:-55,55:-55,55:-55] = 0.0
ipv.figure()
ipv.volshow(V, level=[0.25, 0.75], opacity=0.03, level_width=0.1, data_min=0, data_max=1)
ipv.view(-30, 40)
ipv.show()
import ipyvolume as ipv
fig = ipv.figure()
vol_head = ipv.examples.head(max_shape=128);
vol_head.ray_steps = 400
ipv.view(90, 0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualizating a scan of a male head
|
14,212
|
<ASSISTANT_TASK:>
Python Code:
%pylab
%matplotlib inline
%run jupyter_helpers
%run yc_framework
figure_width = 16
eval_date = create_date('2017-01-03')
def generate_pricing_curvemap(eval_date):
random.seed(0)
pricing_curvemap = CurveMap()
t = linspace(eval_date+0, eval_date+365*80, 7)
def createCurve(name, r0, speed, mean, sigma):
return CurveConstructor.FromShortRateModel(name, t, r0, speed, mean, sigma, interpolation=InterpolationMode.CUBIC_LOGDF)
def createCurveFromSpread(baseCurve, name, r0, speed, mean, sigma):
out = createCurve(name, r0, speed, mean, sigma)
out.add_another_curve(baseCurve)
return out
u3m = createCurve('USD.LIBOR.3M', 0.02, 0.03, 0.035, 5e-4)
u6m = createCurveFromSpread(u3m, 'USD.LIBOR.6M', 0.01, 0.03, 0.011, 5e-4)
u12m = createCurveFromSpread(u6m, 'USD.LIBOR.12M', 0.01, 0.03, 0.011, 5e-4)
g3m = createCurveFromSpread(u3m, 'GBP.LIBOR.3M', 0.03, 0.03, 0.0, 5e-4)
u1b = createCurve('USD/USD.OIS', 0.01, 0.03, 0.011, 5e-4)
g1b = createCurveFromSpread(u1b, 'GBP/GBP.SONIA', 0.005, 0.03, 0.005, 5e-4)
gu1b = createCurveFromSpread(u1b, 'GBP/USD.OIS', 0.001, 0.03, 0.001, 5e-4)
pricing_curvemap.add_curve(u3m)
pricing_curvemap.add_curve(u6m)
pricing_curvemap.add_curve(u12m)
pricing_curvemap.add_curve(g3m)
pricing_curvemap.add_curve(g1b)
pricing_curvemap.add_curve(u1b)
pricing_curvemap.add_curve(gu1b)
return pricing_curvemap
pricing_curvemap = generate_pricing_curvemap(eval_date)
# Display:
figsize(figure_width, 6)
linestyle('solid'), pricing_curvemap.plot(), title('Pricing Curvemap'), legend(), show();
cloned_curve = deepcopy(pricing_curvemap['USD.LIBOR.3M'])
figsize(figure_width, 5), linestyle('solid'), title('Curve Interpolation Modes')
for i, interpolation in enumerate(InterpolationMode._member_map_.values()):
cloned_curve.set_interpolator(interpolation)
cloned_curve.plot(label=interpolation), legend()
curve_builder = CurveBuilder('engine_usd_gbp.xlsx', eval_date)
price_ladder = curve_builder.reprice(pricing_curvemap)
# Display:
figsize(figure_width, 4)
price_ladder.sublist('USD.LIBOR.3M').dataframe()
figsize(figure_width, 6)
m, r = curve_builder.get_instrument_rates(price_ladder.sublist('USD.LIBOR.3M'))
m = [exceldate_to_pydate(int(i)) for i in m]
title('USD.LIBOR.3M instrument par-rates')
linestyle(' '), plt.plot(m,r,marker='.', label='USD.LIBOR.3M instrument par-rates')
linestyle('-'), pricing_curvemap['USD.LIBOR.3M'].plot()
legend();
build_output = curve_builder.build_curves(price_ladder)
# Display:
figsize(figure_width, 6)
title('Curvebuilder output')
linestyle('solid'), build_output.output_curvemap.plot(), legend()
linestyle('dotted'), pricing_curvemap.plot();
jacobian_dPdI = np.linalg.pinv(build_output.jacobian_dIdP)
# Display:
figsize(figure_width, 8)
title("Jacobian Matrix"), xlabel('Pillars'), ylabel('Instruments')
imshow(jacobian_dPdI), colorbar();
risk_calculator = RiskCalculator(curve_builder, build_output)
def visualise_bump(instrument_search_string, bumpsize):
instruments, bumpsize = risk_calculator.find_instruments(instrument_search_string), bumpsize
curvemap_bump = risk_calculator.get_bumped_curvemap(instruments, bumpsize, BumpType.JACOBIAN_REBUILD)
# Display:
figsize(figure_width, 6)
linestyle('solid'), build_output.output_curvemap.plot(), legend()
linestyle('dashed'), curvemap_bump.plot()
title("Effect of bumping instrument %s" % instrument_search_string)
visualise_bump('USD.LIBOR.3M__Swap__20Y', 1e-4)
visualise_bump('USD.LIBOR.3M.*', 15e-4)
visualise_bump('USD.LIBOR.6M__BasisSwap__20Y', 1e-4)
visualise_bump('USD.LIBOR.6M.*', 15e-4)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pricing Curve Map
Step2: Interpolation Modes
Step3: Curve Builder
Step4: Instrument Repricing
Step5: Display price ladder for a specific curve
Step6: Display instrument par-rates
Step7: Curve Building
Step8: Below is the comparison of curves which we have just built (solid lines) with pricing curves (dotted lines). These lines should be as close to each other as possible.
Step9: Instrument/Pillar Jacobian Matrix
Step10: Risk Calculator
Step11: Let's define a convenience function which will bump par-rate of a specific instrument by the given amount of basis points and visualise the effect on all curves.
Step12: Bumping Market Instruments
Step13: Bumping Basis Instruments
|
14,213
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import sys
sys.path.insert(0, "../..")
from insights.combiners.httpd_conf import get_tree
from insights.parsr.query import *
conf = get_tree()
conf["Alias"]
conf["Directory"]
conf["Directory"]["Options"]
conf["Directory", "/"]
conf["Directory", "/", "/var/www"]
conf["Directory", startswith("/var/www")]
conf[contains("Icon")]
conf[contains("Icon"), contains("zip")]
conf[contains("Icon"), any_(contains("CMP")) & any_(contains("zip"))]
conf[contains("Icon"), all_(~startswith("/"))]
conf.find(startswith("Directory"))
query = startswith("Directory")
print("Directives:")
print(conf.find(query).directives)
print()
print("Sections:")
print(conf.find(query).sections)
print()
print("Chained filtering:")
print(conf.find(query).sections["Options"])
print("Top level Sections starting with 'Dir':")
print(conf[startswith("Dir")].sections)
print()
print("Children starting with 'Dir' of any top level Section:")
print(conf.sections[startswith("Dir")])
conf.doc.Directory.Require
conf.doc.Directory.where("Require", "denied")
from insights.parsr.query import make_child_query as q
conf.doc.Directory.where(q("Require", "denied") | q("AllowOverride", "FileInfo"))
res = conf.doc.Directory.where(q("Require", "denied") | q("AllowOverride", "FileInfo"))
res.Options
conf.doc.Directory.where(lambda d: "denied" in d.Require.value or "FileInfo" in d.AllowOverride.value)
res = conf["Blah"]
print("Boolean:", bool(res))
print("Length:", len(res))
print()
print("Iteration:")
for c in conf["Directory"]:
print(c.value)
print()
print("Indexing:")
print(conf["Directory"][0].value)
print(conf["Directory"][first].value)
print(conf["Directory"][-1].value)
print(conf["Directory"][last].value)
sorted(set(c.name for c in conf))
root = conf.find("ServerRoot")[0]
print("Node name:", root.name)
print("Value:", root.value) # gets the value if the entry has only one. raises and exception if it has more than 1
print("Values:", conf.find("Options").values) # same as above except collects values of current results.
print()
print("Unique Values:", conf.find("Options").unique_values) # same as above except values are unique.
print()
print("Attributes:", root.attrs) # an entry may have multiple values
print("Children:", len(root.children)) #
print("Parent:", conf.find("Options")[0].parent.name)
print("Parents:", conf.find("Options").parents.values) # go up one level from the results.
print("Root:", "conf.find('LogFormat')[0].root # Omitted due to size") # root of current entry.
print("Roots:", "conf.find('LogFormat').roots # Omitted due to size") # all roots of current results.
print("File: ", root.file_path) # path of the backing file. Not always available.
print("Original Line:", root.line) # raw line from the original source. Not always available.
print("Line Number:", root.lineno) # line number in source of the element. Not always available.
port = conf.find("Listen").value
print(port)
print(type(port))
conf["Directory"]["Options"].values
conf.find("ServerRoot")
conf.find("Alias")
conf.find("LogFormat")
print(conf.find("Alias")[0])
print(conf.find("Alias")[-1])
r = conf.find("Boom")
print(type(r))
print(r)
print('conf.find("LogFormat"):')
print(conf.find("LogFormat"))
print()
print('conf.find("LogFormat").parents:')
print(conf.find("LogFormat").parents)
conf.find(("IfModule", "logio_module"), "LogFormat")
conf.find("IfModule", ("LogFormat", "combinedio"))
print(conf.select("Alias"))
print()
print(conf.select("LogFormat") or "Nothing")
print(conf.select("LogFormat", deep=True))
print(conf.select("LogFormat", deep=True, roots=False))
print()
print(conf.select("LogFormat", deep=True, roots=False)[0])
print(conf.select("LogFormat", deep=True, roots=False)[-1])
conf.find(("Options", "Indexes")).upto("Directory")
print(conf.find("LogFormat"))
print()
print(conf.find("LogFormat").get_crumbs())
print()
print(conf.doc.IfModule.IfModule.LogFormat)
print()
print(conf.doc.IfModule.LogFormat)
from insights.parsr.query.boolean import pred, pred2
is_ifmod = pred(lambda x: x == "IfModule")
is_user_mod = pred(lambda x: "user" in x)
divisible_by = pred2(lambda in_val, divisor: (in_val % divisor) == 0)
print("Num IfModules:", len(conf[is_ifmod]))
print("User mod checks:", len(conf.find(("IfModule", is_user_mod))))
print("Div by 10?", conf["Listen", divisible_by(10)] or "No matches")
print("Div by 3?", conf["Listen", divisible_by(3)] or "No matches")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: conf now contains the consolidated httpd configuration tree from my machine. The API that follows is exactly the same for nginx, multipath, logrotate, and ini parsers. Yaml and Json parsers have a .doc attribute that exposes the same API. They couldn't do so directly for backward compatibility reasons.
Step2: Notice that the first pair of brackets are a query against the first level of the configuration tree. conf["Alias"] returns all of the "Alias" nodes. conf["Directory"] returns all of the "Directory" nodes.
Step3: The comma constructs a tuple, so conf["Directory", "/"] and conf[("Directory", "/")] are equivalent. The first element of the tuple exactly matches the node name, and subsequent elements exactly match any of the node's attributes. Notice that this is still a query, and the result behaves like a list
Step4: That's asking for Directory nodes with any attribute exactly matching any of "/" or "/var/www". These can be chained with more brackets just like the simpler queries shown earlier.
Step5: Predicates can be combined with boolean logic. Here are all the top level nodes with "Icon" in the name and attributes that contain "CMP" and "zip". Note the helper any_ (there's also an all_) that means any attribute must succeed.
Step6: Here are the entries with all attributes not starting with "/"
Step7: Several predicates are provided
Step8: Notice that conf[startswith("Dir")].sections is not the same as conf.sections[startswith("Dir")]. The first finds all the top level nodes that start with "Dir" and then filters those to just the sections. The second gets all of the top level sections and then searches their children for nodes starting with "Dir."
Step9: Ignoring Case
Step10: Query by Children
Step11: The second is by using the make_child_query helper that lets you combine multiple "top level" queries that include name and value queries.
Step12: Note you can continue the traversal after a where
Step13: The name and value queries inside of q can contain all of the predicates we've seen before, and q instances can be combined with & and | and negated with ~.
Step14: Truth and Iteration
Step15: This is also true of conf itself
Step16: Attributes
Step17: There's also a .values property that will accumulate all of the attributes of multiple children that match a query. Multiple attributes from a single child are converted to a single string.
Step18: Useful functions
Step19: If you want the first or last match, access them with brackets as you would a list
Step20: Find takes an addition parameter, roots, which defaults to False. If it is False, the matching entries are returned. If set to True, the unique set of ancestors of all matching results are returned.
Step21: select
Step22: upto
Step23: get_crumbs
Step24: Custom Predicates
|
14,214
|
<ASSISTANT_TASK:>
Python Code:
mktcaps = {'AAPL':538.7,'GOOG':68.7,'IONS':4.6}# Dictionary wird initialisiert
print(type(mktcaps))
print(mktcaps)
print(mktcaps.values())
print(mktcaps.keys())
print(mktcaps.items())
c=mktcaps.items()
print c[0]
mktcaps['AAPL'] #Gibt den Wert zurück der mit dem Schlüssel "AAPL" verknüpft ist
mktcaps['GS'] #Fehler weil GS nicht in mktcaps enthalten ist
mktcaps.get('GS') #Ergibt keinen Wert, da GS nicht in mktcaps enthalten ist
mktcaps['GS'] = 88.65 #Fügt GS in das dictionary ein
print(mktcaps)
del(mktcaps['GOOG']) #ENtfernt GOOG von mktcaps
print(mktcaps)
mktcaps.keys() #gibt alle keys zurück
mktcaps.values() #gibt alle Werte zurück
import hashlib
l=('AAA','BBB','CCC','DDD','EEE')
print(l)
print(len(l))
hshdict={'AAA':hashlib.sha256('AAA)')}
hshdict.values()
v=hshdict['AAA']
m=v.hexdigest()
print(m)
alter = {'Peter':45,'Julia':23,'Mathias':36} #Erzeugen eines Dictionaries
print(alter)
alter['Julia']=27 #Ändern des Alters
alter['Monika']=33 #Hinzufügen von Monika - die Reihenfolge der Schlüssel spielt keine Rolle
print(alter)
if 'Monika' in alter:
print (alter['Monika'])
temperatur={'stuttgart':32.9,'muenchen':29.8,'hamburg':24.4}# Erzeugen eines dictionaries mit Temperaturen in verschiedenen Städten
temperatur['koeln']=29.7 #hinzufuegen der temperatur in koeln
print(temperatur) #ausgabe der temperaturen
for stadt in temperatur:
print('Die Temperatur in %s ist %g °C' % (stadt,temperatur[stadt]))
if 'Berlin' in temperatur:
print ('Berlin:', temperatur['Berlin'])
else:
print ('Keine Daten für Berlin gefunden')
'stuttgart' in temperatur #überprüfen ob Schlüssel in temperatur enthalten ist
temperatur.keys() #Ausgabe der Schlüssel im Dictionary
temperatur.values()#ausgabe der Werte im Dictionary
for stadt in sorted(temperatur):
print(stadt)
temperatur_kopie=temperatur.copy() #erstellt eine KOpie des dictonaries
print (temperatur_kopie)
temperatur2={'stuttgart':22.9,'muenchen':23.8,'hamburg':21.4} #ein 2-tes dictionary
temperatur.update(temperatur2)
for stadt in temperatur:
print('Die Temperatur in %s ist %g °C' % (stadt,temperatur[stadt]))
print('Anzahl enthaltene Staedte: %g'% len(temperatur))
temperatur2={'stuttgart':22.9,'muenchen':23.8,'hamburg':21.4,'koeln':18.6,'frankfurt':20.6, 'weimar':18.8} #ein 2-tes dictionary
temperatur.update(temperatur2)
for stadt in temperatur:
print('Die Temperatur in %s ist %g °C' % (stadt,temperatur[stadt]))
print('Anzahl enthaltene Staedte: %g'% len(temperatur))
st={}#Erzeugen des leeren dictionarys
st['100100'] = {'Mathe':1.0, 'Bwl':2.5}
st['100200'] = {'Mathe':2.3, 'Bwl':1.8}
print(st.items())
print(type(st))
print(st.values())
print(st.keys())
def stud_verz():
stud={}#erzeugen eines leeren dictionaries
student=input('Matrikel-Nr als string eingeben:')
while student:
Mathe = input('Mathe Note eingeben:')
Bwl = input('Bwl Note eingeben:')
stud[student]={"Mathematik":Mathe,"BWL":Bwl}
student=input('Matrikel-Nr als string eingeben:')
return stud
print (stud_verz())
d1={'hans':1.8,'peter':1.73,'rainer':1.74}
d2={'petra':1.8,'hannes':1.73,'rainer':1.78}
d1.update(d2)
print(d1)
deutsch = {'key':['Schluessel','Taste'],'slice':['Scheibe','Schnitte','Stueck'],'value':['Wert']}
print(deutsch)
######Abfangen von Abfragefehlern
def uebersetze(wort,d):
if wort in d:
return d[wort]
else:
return 'unbekannt'
print(uebersetze('slice',deutsch))
uebersetze('search',deutsch)
#Vokabeltrainer entwickeln
import random
#Definition der Funktionen
def dict_laden(pfad):
d={}
try:
datei = open(pfad)
liste = datei.readlines()
for eintrag in liste:
l_eintrag = eintrag.split()
d[l_eintrag[0]]=l_eintrag[1:]
datei.close()
except:
pass
return d
#def aufgabe(d):
zufall = random.randint(0, len(d.keys())-1)
vokabel = list(d.keys())[zufall]
#print(vokabel +'?')
#Datei liegt auf dem Pfad
#c:\\Benutzer\\ramon\\Dokumente\\Python Scripts\\python-edx-07-07-17\\woerterbuch.txt'
#woerterbuch liste von einträgen mit leerzeichen getrennt
d={}
datei=open('woerterbuch.txt')
liste = datei.readlines()
print(liste)
for eintrag in liste:
l_eintrag = eintrag.split()#trennung an leerzeichen
#print(l_eintrag[0])
#print(l_eintrag[1])
d[l_eintrag[0]]=l_eintrag[1:]
datei.close()
print(d)
zufall = random.randint(0, len(d.keys())-1)
vokabel = list(d.keys())[zufall]
print(vokabel+' ?')
antwort=input()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3>Beispiel
Step2: <h3>Beispiel
Step3: <h2>Beispiel Studenten - mit dictionary</h2>
Step4: <h2>Schrittweiser Aufbau eines Studentenverezichnisses</h2>
Step5: <h2>Ein Dictionary aus anderen zusammensetzen
Step6: <h2>Datenzugriff in einem dictionary
Step7: <h1>Vokabeltrainer entwickeln
|
14,215
|
<ASSISTANT_TASK:>
Python Code:
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq --pre
print("installed cirq.")
from typing import Iterable, List, Optional, Sequence
import matplotlib.pyplot as plt
import numpy as np
import os
import cirq
import cirq_google as cg # Contains the Floquet calibration tools.
# The Google Cloud Project id to use.
project_id = '' #@param {type:"string"}
processor_id = "" #@param {type:"string"}
from cirq_google.engine.qcs_notebook import get_qcs_objects_for_notebook
device_sampler = get_qcs_objects_for_notebook(project_id, processor_id)
line_length = 20 if device_sampler.is_simulator else 35
line = cg.line_on_device(device_sampler.device, line_length)
print(line)
segment_length = 5
segments = [line[i: i + segment_length]
for i in range(0, line_length - segment_length + 1, segment_length)]
print(*segments[0])
sqrt_iswap = cirq.ISWAP ** 0.5
def create_linear_chain_segment(
segment: Sequence[cirq.Qid],
num_trotter_steps: int,
) -> cirq.Circuit:
Returns a linear chain circuit on one segment.
circuit = cirq.Circuit(cirq.X.on(segment[len(segment) // 2]))
# Trotter steps.
for step in range(num_trotter_steps):
offset = step % 2
circuit += cirq.Moment(
[sqrt_iswap.on(a, b) for a, b in zip(segment[offset::2],
segment[offset + 1::2])])
return circuit
def create_linear_chain_circuit(
segments: Sequence[Sequence[cirq.Qid]],
num_trotter_steps: int,
) -> cirq.Circuit:
Returns a linear chain circuit to demonstrate Floquet calibration on.
circuit_segments = [create_linear_chain_segment(segment, num_trotter_steps) for segment in segments]
circuit = cirq.Circuit.zip(*circuit_segments)
return circuit + cirq.measure(*sum(segments, ()), key='z')
Example of the linear chain circuit on one segment of the line.
num_trotter_steps = 20
circuit_on_segment = create_linear_chain_circuit(
segments=[segments[0]],
num_trotter_steps=num_trotter_steps,
)
print(circuit_on_segment.to_text_diagram(qubit_order=segments[0]))
Circuit used to demonstrate Floquet calibration.
circuit = create_linear_chain_circuit(
segments=segments,
num_trotter_steps=num_trotter_steps
)
Simulate one segment on a simulator.
nreps = 20_000
sim_result = cirq.Simulator().run(circuit_on_segment, repetitions=nreps)
Execute the full circuit on a processor without Floquet calibration.
raw_results = device_sampler.sampler.run(circuit, repetitions=nreps)
def z_density_from_measurements(
measurements: np.ndarray,
post_select_filling: Optional[int] = 1
) -> np.ndarray:
Returns density for one segment on the line.
counts = np.sum(measurements, axis=1, dtype=int)
if post_select_filling is not None:
errors = np.abs(counts - post_select_filling)
counts = measurements[errors == 0]
return np.average(counts, axis=0)
def z_densities_from_result(
result: cirq.Result,
segments: Iterable[Sequence[cirq.Qid]],
post_select_filling: Optional[int] = 1
) -> List[np.ndarray]:
Returns densities for each segment on the line.
measurements = result.measurements['z']
z_densities = []
offset = 0
for segment in segments:
z_densities.append(z_density_from_measurements(
measurements[:, offset: offset + len(segment)],
post_select_filling)
)
offset += len(segment)
return z_densities
#@title
def plot_density(
ax: plt.Axes,
sim_density: np.ndarray,
raw_density: np.ndarray,
cal_density: Optional[np.ndarray] = None,
raw_errors: Optional[np.ndarray] = None,
cal_errors: Optional[np.ndarray] = None,
title: Optional[str] = None,
show_legend: bool = True,
show_ylabel: bool = True,
) -> None:
Plots the density of a single segment for simulated, raw, and calibrated
results.
colors = ["grey", "orange", "green"]
alphas = [0.5, 0.8, 0.8]
labels = ["sim", "raw", "cal"]
# Plot densities.
for i, density in enumerate([sim_density, raw_density, cal_density]):
if density is not None:
ax.plot(
range(len(density)),
density,
"-o" if i == 0 else "o",
markersize=11,
color=colors[i],
alpha=alphas[i],
label=labels[i]
)
# Plot errors if provided.
errors = [raw_errors, cal_errors]
densities = [raw_density, cal_density]
for i, (errs, dens) in enumerate(zip(errors, densities)):
if errs is not None:
ax.errorbar(
range(len(errs)),
dens,
errs,
linestyle='',
color=colors[i + 1],
capsize=8,
elinewidth=2,
markeredgewidth=2
)
# Titles, axes, and legend.
ax.set_xticks(list(range(len(sim_density))))
ax.set_xlabel("Qubit index in segment")
if show_ylabel:
ax.set_ylabel("Density")
if title:
ax.set_title(title)
if show_legend:
ax.legend()
def plot_densities(
sim_density: np.ndarray,
raw_densities: Sequence[np.ndarray],
cal_densities: Optional[Sequence[np.ndarray]] = None,
rows: int = 3
) -> None:
Plots densities for simulated, raw, and calibrated results on all segments.
if not cal_densities:
cal_densities = [None] * len(raw_densities)
cols = (len(raw_densities) + rows - 1) // rows
fig, axes = plt.subplots(
rows, cols, figsize=(cols * 4, rows * 3.5), sharey=True
)
if rows == 1 and cols == 1:
axes = [axes]
elif rows > 1 and cols > 1:
axes = [axes[row, col] for row in range(rows) for col in range(cols)]
for i, (ax, raw, cal) in enumerate(zip(axes, raw_densities, cal_densities)):
plot_density(
ax,
sim_density,
raw,
cal,
title=f"Segment {i + 1}",
show_legend=False,
show_ylabel=i % cols == 0
)
# Common legend for all subplots.
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels)
plt.tight_layout(pad=0.1, w_pad=1.0, h_pad=3.0)
Extract densities from measurement results.
# Simulator density.
sim_density, = z_densities_from_result(sim_result,[circuit_on_segment])
# Processor densities without Floquet calibration.
raw_densities = z_densities_from_result(raw_results, segments)
plot_densities(sim_density, raw_densities, rows=int(np.sqrt(line_length / segment_length)))
Plot mean density and variance over segments.
raw_avg = np.average(raw_densities, axis=0)
raw_std = np.std(raw_densities, axis=0, ddof=1)
plot_density(
plt.gca(),
sim_density,
raw_density=raw_avg,
raw_errors=raw_std,
title="Average over segments"
)
# (calibrated_circuit, calibrations
# ) = cg.run_zeta_chi_gamma_compensation_for_moments(
# circuit,
# device_sampler.sampler,
# )
Step 1: Find moments in the circuit that need to be characterized.
(characterized_circuit, characterization_requests
) = cg.prepare_characterization_for_moments(
circuit,
options=cg.FloquetPhasedFSimCalibrationOptions(
characterize_theta=False,
characterize_zeta=True,
characterize_chi=False,
characterize_gamma=True,
characterize_phi=False
)
)
Show an example characterization request.
print(f"Total {len(characterization_requests)} moment(s) to characterize.")
print("\nExample request")
request = characterization_requests[0]
print("Gate:", request.gate)
print("Qubit pairs:", request.pairs)
print("Options: ", request.options)
Step 2: Characterize moments on the engine.
characterizations = cg.run_calibrations(
characterization_requests,
device_sampler.sampler,
max_layers_per_request=1,
)
print(f"Total: {len(characterizations)} characterizations.")
print()
(pair, parameters), *_ = characterizations[0].parameters.items()
print(f"Example pair: {pair}")
print(f"Example parameters: {parameters}")
Step 3: Apply corrections to the circuit to get a calibrated circuit.
calibrated_circuit = cg.make_zeta_chi_gamma_compensation_for_moments(
characterized_circuit,
characterizations
)
print("Portion of calibrated circuit:")
print("\n".join(
calibrated_circuit.circuit.to_text_diagram(qubit_order=line).splitlines()[:9] +
["..."]))
Run the calibrated circuit on the engine.
cal_results = device_sampler.sampler.run(calibrated_circuit.circuit, repetitions=nreps)
Extract densities from measurement results.
cal_densities = z_densities_from_result(cal_results, segments)
plot_densities(
sim_density, raw_densities, cal_densities, rows=int(np.sqrt(line_length / segment_length))
)
Plot mean density and variance over segments.
raw_avg = np.average(raw_densities, axis=0)
raw_std = np.std(raw_densities, axis=0, ddof=1)
cal_avg = np.average(cal_densities, axis=0)
cal_std = np.std(cal_densities, axis=0, ddof=1)
plot_density(
plt.gca(),
sim_density,
raw_avg,
cal_avg,
raw_std,
cal_std,
title="Average over segments"
)
Plot errors of raw vs calibrated results.
fig, axes = plt.subplots(ncols=2, figsize=(15, 4))
axes[0].set_title("Error of the mean")
axes[0].set_ylabel("Density")
axes[1].set_title("Data standard deviation")
colors = ["orange", "green"]
labels = ["raw", "cal"]
for index, density in enumerate([raw_densities, cal_densities]):
color = colors[index]
label = labels[index]
average_density = np.average(density, axis=0)
sites = list(range(len(average_density)))
error = np.abs(average_density - sim_density)
std_dev = np.std(density, axis=0, ddof=1)
axes[0].plot(sites, error, color=color, alpha=0.6)
axes[0].scatter(sites, error, color=color)
axes[1].plot(sites, std_dev, label=label, color=color, alpha=0.6)
axes[1].scatter(sites, std_dev, color=color)
for ax in axes:
ax.set_xticks(sites)
ax.set_xlabel("Qubit index in segment")
plt.legend();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Defining the circuit
Step3: This line is now broken up into a number of segments of a specified length (number of qubits).
Step4: For example, the first segment consists of the following qubits.
Step7: We now implement a number of Trotter steps on each segment in parallel. The middle qubit on each segment is put into the $|1\rangle$ state, then each Trotter step consists of staggered $\sqrt{\text{iSWAP}}$ gates. All qubits are measured in the $Z$ basis at the end of the circuit.
Step9: As an example, we show this circuit on the first segment of the line from above.
Step11: The circuit we will use for Floquet calibration is this same pattern repeated on all segments of the line.
Step13: Execution on a simulator
Step15: Execution on the processor without Floquet calibration
Step18: Comparing raw results to simulator results
Step21: Now we define functions to plot the densities for the simulator, processor without Floquet calibration, and processor with Floquet calibration (which we will use at the end of this notebook). The first function is for a single segment, and the second function is for all segments.
Step23: Visualizing results
Step24: We first plot the densities on each segment. Note that the simulator densities ("sim") are repeated on each segment and the lines connecting them are just visual guides.
Step26: We can also look at the average and variance over the segments.
Step27: In the next section, we will use Floquet calibration to produce better average results. After running the circuit with Floquet calibration, we will use these same visualizations to compare results.
Step29: The returned calibrated_circuit.circuit can then be run on the engine. The full list of returned arguments is as follows
Step31: The characterization_requests contain information on the operations (gate + qubit pairs) to characterize.
Step33: We now characterize them on the engine using cirq_google.run_calibrations.
Step34: The characterizations store characterization results for each pair in each moment, for example.
Step36: Finally, we apply corrections to the original circuit.
Step37: The calibrated circuit can now be run on the processor. We first inspect the calibrated circuit to compare to the original.
Step39: Note again that $\sqrt{\text{iSWAP}}$ gates are padded by $Z$ phases to compensate for errors. We now run this calibrated circuit.
Step41: Comparing raw results to calibrated results
Step42: Now we reproduce the same density plots from above on each segment, this time including the calibrated ("cal") results.
Step44: We also visualize the mean and variance of results over segments as before.
Step46: Last, we can look at density errors between raw/calibrated results and simulated results.
|
14,216
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tflite-model-maker
import numpy as np
import os
import tensorflow as tf
assert tf.__version__.startswith('2')
from tflite_model_maker import configs
from tflite_model_maker import model_spec
from tflite_model_maker import text_classifier
from tflite_model_maker import TextClassifierDataLoader
data_dir = tf.keras.utils.get_file(
fname='SST-2.zip',
origin='https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8',
extract=True)
data_dir = os.path.join(os.path.dirname(data_dir), 'SST-2')
spec = model_spec.get('mobilebert_classifier')
train_data = TextClassifierDataLoader.from_csv(
filename=os.path.join(os.path.join(data_dir, 'train.tsv')),
text_column='sentence',
label_column='label',
model_spec=spec,
delimiter='\t',
is_training=True)
test_data = TextClassifierDataLoader.from_csv(
filename=os.path.join(os.path.join(data_dir, 'dev.tsv')),
text_column='sentence',
label_column='label',
model_spec=spec,
delimiter='\t',
is_training=False)
model = text_classifier.create(train_data, model_spec=spec)
loss, acc = model.evaluate(test_data)
config = configs.QuantizationConfig.create_dynamic_range_quantization(optimizations=[tf.lite.Optimize.OPTIMIZE_FOR_LATENCY])
config._experimental_new_quantizer = True
model.export(export_dir='mobilebert/', quantization_config=config)
spec = model_spec.get('average_word_vec')
data_dir = tf.keras.utils.get_file(
fname='SST-2.zip',
origin='https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8',
extract=True)
data_dir = os.path.join(os.path.dirname(data_dir), 'SST-2')
train_data = TextClassifierDataLoader.from_csv(
filename=os.path.join(os.path.join(data_dir, 'train.tsv')),
text_column='sentence',
label_column='label',
model_spec=spec,
delimiter='\t',
is_training=True)
test_data = TextClassifierDataLoader.from_csv(
filename=os.path.join(os.path.join(data_dir, 'dev.tsv')),
text_column='sentence',
label_column='label',
model_spec=spec,
delimiter='\t',
is_training=False)
model = text_classifier.create(train_data, model_spec=spec, epochs=10)
model.summary()
loss, acc = model.evaluate(test_data)
model.export(export_dir='average_word_vec/')
model.evaluate_tflite('average_word_vec/model.tflite', test_data)
new_model_spec = model_spec.AverageWordVecModelSpec(wordvec_dim=32)
new_train_data = TextClassifierDataLoader.from_csv(
filename=os.path.join(os.path.join(data_dir, 'train.tsv')),
text_column='sentence',
label_column='label',
model_spec=new_model_spec,
delimiter='\t',
is_training=True)
model = text_classifier.create(new_train_data, model_spec=new_model_spec)
new_model_spec = model_spec.get('mobilebert_classifier')
new_model_spec.seq_len = 256
model = text_classifier.create(train_data, model_spec=spec, epochs=20)
loss, accuracy = model.evaluate(test_data)
spec = model_spec.get('bert_classifier')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Text classification with TensorFlow Lite Model Maker
Step2: Import the required packages.
Step3: Get the data path
Step4: You can also upload your own dataset to work through this tutorial. Upload your dataset by using the left sidebar in Colab.
Step5: Step 2. Load train and test data specific to an on-device ML app and preprocess the data according to a specific model_spec.
Step6: Step 3. Customize the TensorFlow model.
Step7: Step 4. Evaluate the model.
Step8: Step 5. Export as a TensorFlow Lite model.
Step9: You can also download the model using the left sidebar in Colab.
Step10: Load Input Data Specific to an On-device ML App
Step11: The SST-2 dataset has train.tsv for training and dev.tsv for validation. The files have the following format
Step12: The Model Maker library also supports the from_folder() method to load data. It assumes that the text data of the same class are in the same subdirectory and that the subfolder name is the class name. Each text file contains one movie review sample. The class_labels parameter is used to specify which the subfolders.
Step13: Examine the detailed model structure.
Step14: Evaluate the Customized Model
Step15: Export as a TensorFlow Lite Model
Step16: The TensorFlow Lite model file can be used in the text classification reference app by adding model.tflite to the assets directory. Do not forget to also change the filenames in the code.
Step17: Advanced Usage
Step18: Get the preprocessed data.
Step19: Train the new model.
Step20: You can also adjust the MobileBERT model.
Step21: Tune the training hyperparameters
Step22: Evaluate the newly retrained model with 20 training epochs.
Step23: Change the Model Architecture
|
14,217
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import sys
# Modify the path
sys.path.append("..")
import pandas as pd
import yellowbrick as yb
import matplotlib.pyplot as plt
from yellowbrick.classifier import ROCAUC
from sklearn.model_selection import train_test_split
occupancy = pd.read_csv('data/occupancy/occupancy.csv')
features = [
"temperature", "relative humidity", "light", "C02", "humidity"
]
classes = ["unoccupied", "occupied"]
X = occupancy[features]
y = occupancy['occupancy']
# Create the train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
classifiers = [
AdaBoostClassifier(),
MLPClassifier(),
DecisionTreeClassifier(),
QuadraticDiscriminantAnalysis(),
DecisionTreeClassifier(),
RandomForestClassifier(),
]
for classifier in classifiers:
oz = ROCAUC(classifier)
oz.fit(X_train, y_train)
oz.score(X_test, y_test)
g = oz.show()
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import RidgeClassifier, RidgeClassifierCV
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
classifiers = [
BernoulliNB(),
MultinomialNB(),
LogisticRegression(),
LogisticRegressionCV()
]
for classifier in classifiers:
oz = ROCAUC(classifier)
oz.fit(X_train, y_train)
oz.score(X_test, y_test)
g = oz.show()
oz = ROCAUC(LinearSVC())
oz.fit(X_train, y_train)
oz.score(X_test, y_test)
oz.show()
oz = ROCAUC(SVC())
oz.fit(X_train, y_train)
oz.score(X_test, y_test)
oz.show()
oz = ROCAUC(SGDClassifier())
oz.fit(X_train, y_train)
oz.score(X_test, y_test)
oz.show()
oz = ROCAUC(PassiveAggressiveClassifier())
oz.fit(X_train, y_train)
oz.score(X_test, y_test)
oz.show()
oz = ROCAUC(RidgeClassifier())
oz.fit(X_train, y_train)
oz.score(X_test, y_test)
oz.show()
oz = ROCAUC(RidgeClassifierCV())
oz.fit(X_train, y_train)
oz.score(X_test, y_test)
oz.show()
attrs = (
'predict_proba',
'decision_function',
)
failing_classifiers = [
LinearSVC(),
SVC(),
SGDClassifier(),
PassiveAggressiveClassifier(),
RidgeClassifier(),
RidgeClassifierCV()
]
def profile(classifiers):
for classifier in classifiers:
classifier.fit(X_train, y_train)
# Return the first resolved function
for attr in attrs:
try:
method = getattr(classifier, attr, None)
if method:
y_pred = method(X_test)
except AttributeError:
continue
print("y_pred shape for {} is {}.".format(
classifier.__class__.__name__, y_pred.shape)
)
print(y_pred)
profile(failing_classifiers)
working_classifiers_decision = [
AdaBoostClassifier(),
QuadraticDiscriminantAnalysis(),
LogisticRegression(),
LogisticRegressionCV()
]
profile(working_classifiers_decision)
working_classifiers_proba = [
MLPClassifier(),
DecisionTreeClassifier(),
RandomForestClassifier(),
BernoulliNB(),
MultinomialNB()
]
profile(working_classifiers_proba)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Binary Classification with 1D Coefficients or Feature Importances
Step2: Looks good; everything works!
Step3: Some of these generate the IndexError
Step4: so what's going on here?
Step5: Classifiers that Currently Work with Binary Classification & ROCAUC
Step6: Sklearn Documentation
|
14,218
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(3908544)
# Generate two random datasets.
data1 = np.random.normal(loc = 0, scale = 58, size = 1000)
data2 = 200 * np.random.random(1000) - 100
# What are their means and variances?
print("Dataset 1 :: {:.2f} (avg) :: {:.2f} (std)".format(data1.mean(), data1.std()))
print("Dataset 2 :: {:.2f} (avg) :: {:.2f} (std)".format(data2.mean(), data2.std()))
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure().set_figwidth(12)
plt.subplot(121)
plt.title("Dataset 1")
_ = plt.hist(data1, bins = 20, range = (-100, 100))
plt.subplot(122)
plt.title("Dataset 2")
_ = plt.hist(data2, bins = 20, range = (-100, 100))
np.random.seed(8493248)
X = np.random.normal(size = 1000)
Y1 = (X + np.random.normal(size = 1000) / 2)
Y2 = (-X + np.random.normal(size = 1000) / 2)
plt.figure().set_figwidth(12)
plt.subplot(121)
plt.title("Dataset Y1")
_ = plt.hist(Y1, bins = 50, range = (-4, 4))
plt.subplot(122)
plt.title("Dataset Y2")
_ = plt.hist(Y2, bins = 50, range = (-4, 4))
plt.scatter(X, Y1, marker = ".", color = "black", label = "Dataset 1")
plt.scatter(X, Y2, marker = ".", color = "gray", label = "Dataset 2")
plt.xlabel("X")
plt.ylabel("Y")
plt.legend(loc = 0)
plt.title("Joint Distribution")
print(np.corrcoef(X, Y1)[0, 1])
print(np.corrcoef(X, Y2)[0, 1])
personA = np.array([63, 150]) # 63 inches, 150 pounds
personB = np.array([67, 160]) # 67 inches, 160 pounds
personC = np.array([70, 171]) # 70 inches, 171 pounds
plt.scatter(personA[0], personA[1])
plt.scatter(personB[0], personB[1])
plt.scatter(personC[0], personC[1])
import numpy.linalg as nla
print("A to B: {:.2f}".format( nla.norm(personA - personB) ))
print("A to C: {:.2f}".format( nla.norm(personA - personC) ))
print("B to C: {:.2f}".format( nla.norm(personB - personC) ))
personA = np.array([160.0, 150]) # 160 cm, 150 pounds
personB = np.array([170.2, 160]) # 170.2 cm, 160 pounds
personC = np.array([177.8, 171]) # 177.8 cm, 171 pounds
plt.scatter(personA[0], personA[1])
plt.scatter(personB[0], personB[1])
plt.scatter(personC[0], personC[1])
print("A to B: {:.2f}".format( nla.norm(personA - personB) ))
print("A to C: {:.2f}".format( nla.norm(personA - personC) ))
print("B to C: {:.2f}".format( nla.norm(personB - personC) ))
def rescale(data):
# First: subtract off the mean of each column.
data -= data.mean(axis = 0)
# Second: divide by the standard deviation of each column.
data /= data.std(axis = 0)
return data
np.random.seed(3248)
X = np.random.random((5, 3)) # Five rows with three dimensions.
print("=== BEFORE ===")
print("Means: {}\nStds: {}".format(X.mean(axis = 0), X.std(axis = 0)))
Xs = rescale(X)
print("=== AFTER ===")
print("Means: {}\nStds: {}".format(Xs.mean(axis = 0), Xs.std(axis = 0)))
import pandas as pd # "pd" is the import convention, like "np" is for NumPy
data = pd.Series([0.25, 0.5, 0.75, 1])
print(data)
data = pd.Series({2:'a', 1:'b', 3:'c'})
print(data)
# Standard Python dictionary, nothing new and exciting.
population_dict = {'California': 38332521,
'Texas': 26448193,
'New York': 19651127,
'Florida': 19552860,
'Illinois': 12882135}
population = pd.Series(population_dict) # Oh right: you can feed dicts to Series!
area_dict = {'California': 423967,
'Texas': 695662,
'New York': 141297,
'Florida': 170312,
'Illinois': 149995}
area = pd.Series(area_dict)
# Build the DataFrame!
states = pd.DataFrame({'population': population,
'area': area})
print(states)
print(states.index) # Our row names
print(states.columns) # Our Series / column names
print(states['population'])
print(states.iloc[:, 1])
x = np.array([0, 1, None, 2])
print(x.sum())
x = np.array([0, 1, np.nan, 2])
print(x.sum())
data = pd.Series([1, np.nan, 'hello', None])
print(data.isnull()) # Where are the null indices?
print()
print(data[data.notnull()]) # Use the boolean mask to pull out non-null indices.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Both datasets contain 1000 random numbers. Both datasets have very nearly the same mean and same standard deviation.
Step2: Behold
Step3: If you plotted Y1 and Y2 using the histograms from the previous strategy, you'd get two datasets that looked pretty much identical.
Step4: Maybe slightly different shapes, but qualitatively (and statistically) identical.
Step5: TOTES DIFFERENT, again!
Step6: "Correlation" means as we change one variable (X), another variable changes by a similar amount (Y). Positive correlation means as we increase one variable, the other increases; negative correlation means as we increase one variable, the other decreases.
Step7: And you compute the "distance" between each point (we'll just use standard Euclidean distance)
Step8: As you can see, the two closest data points are person A and person B.
Step9: Using this data, we arrive at the conclusion that persons B and C are most similar! Oops...?
Step10: Of course, like anything (everything?), there are still caveats.
Step11: Think of a Series as a super-fancy 1D NumPy array. It's so fancy, in fact, that you can give a Series completely custom indices, sort of like a dictionary.
Step12: If a Series is essentially a fancy 1D NumPy array, then a DataFrame is a fancy 2D array. Here's an example.
Step13: DataFrames are really nice--you can directly access all the extra information they contain.
Step14: You can also directly access the property you're interested in, rather than having to memorize the index number as with NumPy arrays
Step15: But you can also access the same information almost as you would with a NumPy array
Step16: Note the use of the .iloc attribute of DataFrames.
Step17: Welp, that crashed and burned. What about using NaN instead?
Step18: Well, it didn't crash. But since "NaN" specifically stands for "Not A Number", it makes arithmetic difficult since any operation involving a NaN will return NaN.
|
14,219
|
<ASSISTANT_TASK:>
Python Code:
a_df = pd.DataFrame([
{
"Name": "A 회사 직원 (1)",
"Age": 30,
},
{
"Name": "A 회사 직원 (2)",
"Age": 29,
}
])
b_df = pd.DataFrame([
{
"Name": "B 회사 직원 (1)",
"Age": 33,
},
{
"Name": "B 회사 직원 (2)",
"Age": 26,
}
])
a_df
a_df + b_df # Matrix Sum 과 유사하게 합쳐진다. (행렬합처럼)
together_df = pd.DataFrame(columns=["Name", "Age"])
# for문을 돌리면서 together_df.loc[len(together_Df)] ...
a_df.append(b_df)
a_df
ab_df = a_df.append(b_df) #이렇게 하면 inplace로 바꿀 수 있다.
ab_df.loc[0] #2개가 나와 버린다. 인덱스가 중복되었기 때문
# 1. 번외로 index가 중복 되었을 때 해결하는 방법!
ab_df.reset_index()
a_df["Age"] #Series => 특정 Column 하나만 가져오는 애
a_df[["Age"]] #리스트를 받으면. DataFrame => 특정 Columns들만 가져오는 애
ab_df.reset_index()[["Name", "Age"]] #이렇게 해도 되지만 번거롭다.
ab_df.reset_index(drop=True)
ab_df.reset_index(drop=True, inplace=True) #이거 명령어 추가하면 원본 데이터가 변경된다.
pd.concat([a_df, b_df])
ab_df = pd.concat([a_df, b_df]).reset_index(drop=True)
ab_df
a_df["Company"] = "KaKao"
b_df["Company"] = "Daum"
ab_df = pd.concat([a_df, b_df])
ab_df
# 카카오에 다녔던 친구를 뽑고 싶습니다.
# 칼럼의 값을 비교해서 True/False series를 만들고 원본 데이터프레임에서 시리즈의 값이 True인 친구만 뽑는다.(행렬곱)
is_kakao = ab_df["Company"] == "KaKao"
is_kakao
ab_df[is_kakao] #지금은 Series로 들어갔다.
a_df = a_df[["Name", "Age"]]
b_df = b_df[["Name", "Age"]]
b_df
ab_df = pd.concat([a_df, b_df], keys=["KaKao", "Daum"])
ab_df.loc["KaKao"] # 결과가 DataFrame 으로 나왔다.
a_df.loc[0] #결과가 Series로 나온다. 원본 데이터 형태에 따라서 결과가 다르게 나온다.
ab_df.loc["KaKao"].loc[0]
name_email_df = pd.DataFrame([
{
"Name": "김기표",
"Email": "kkp@gmail.com",
},
{
"Name": "유영수",
"Email": "yys@naver.com",
}
])
address_df = pd.DataFrame([
{
"Address": "패캠 대기빌딩 김기표님 앞",
},
{
"Address": "패캠 대기빌딩 유영수님 앞",
}
])
name_email_df
address_df
name_email_df + address_df
pd.concat([name_email_df, address_df])
pd.concat([name_email_df, address_df], axis=1) #기본값으로 0, False가 들어가 있다.
pd.concat([name_email_df, address_df], axis=True)
name_email_df = pd.DataFrame([
{
"Name": "김기표",
"Email": "kkp@naver.co.kr",
},
{
"Name": "김기준",
"Email": "sj@gmail.co.kr",
}
])
address_df = pd.DataFrame([
{
"Address": "패캠 대기빌딩 김기표님 앞",
},
{
"Address": "패캠 대기빌딩 김기준님 앞",
},
{
"Address": "패캠 대기빌딩 김기상님 앞",
}
])
pd.concat([name_email_df, address_df], axis=1)
ab_df = a_df.append(b_df).reset_index(drop=True)
ab_df
ab_df.iterrows()
for index, row in ab_df.iterrows(): #가능하면 for문 말고 다른 방법을 찾아보자.
print(row) #row를 기준으로 iteration을 돈다.
# iterrows => 수작업으로 고생을 해서 구현을 하다가 => pandas내부기능으로 옮기는 작업을 할 예정이다.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A와 B 회사가 합병해서 하나의 DF으로 만들자.
Step2: append 2. concat 이 있어.
Step3: pd.concat
Step4: 카카오(A)와 다음(B) 합병 했는데 누가 어디 출신인지 알고 싶다.
Step5: df[STR] => 그 칼럼의 값들에 대한 시리즈
Step6: pd.concat으로 인덱스 기준으로 상위 하위 개념 나눌 수 있다.
Step7: 위에서 Row로 합치는 것을 했다.
Step8: row를 기준으로 (즉 x축을 기준으로) concat을 한 것이다.
Step9: pandas에서 for문을 돌리는 방법이 따로 있다.
|
14,220
|
<ASSISTANT_TASK:>
Python Code:
from simulator import Simulator, Map, Agent
import numpy as np
class config:
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 10,
'world_width': 300,
'world_height': 300,
'screen_width': 600,
'screen_height': 600,
'dt': 1.0 / 10, # update interval in the system
'eps': 1.0 # minimal distance for collision detection
}
rebounce = 0.5 # rebounce factor. 0 for no rebounce. 1 for rebounce with same speed at the opposite direction.
env = Simulator(config=config)
map = Map()
map.get_map_from_geom2d(env, kp=np.array([[-100,0], [100,0]]))
n_targets = 10
n_robots = 10
targets = [Agent(env, kp=np.array([[-3, 0], [3, 0]]), color=(1, 0, 0, 0.5), v_max=1.5) for i in range(n_targets)]
robots = [Agent(env, kp=np.array([[-3, 0], [3, 0]]), color=(1, 0, 1, 0.5), v_max=2) for i in range(n_robots)]
vs = (np.random.rand(n_targets+n_robots,2) - 0.5) * 4
while True:
env._render()
for i in range(n_targets + n_robots):
if np.random.rand() < 0.02:
vs[i] = (np.random.rand(2) - 0.5) * 4
else:
vs[i] = vs[i]
for i, t in enumerate(targets+robots):
t.update(v=np.array(vs[i]))
from simulator import Simulator, Map, Agent
from devices import Device
import numpy as np
from time import time
from utils import dist
# an example of simulator configuration
class config:
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 10,
'world_width': 300,
'world_height': 250,
'screen_width': 600, # size of window
'screen_height': 500, # size of window
'dt': 1.0 / 10, # update interval in the system
'eps': 1.0 # minimal distance for collision detection
}
rebounce = 0.5 # rebounce factor. 0 for no rebounce. 1 for rebounce with same speed at the opposite direction.
# example of overloading agent class
class Robot(Agent):
def __init__(self, env):
'''
for each subclass of Agent you need to implement update method to update the velocity or acceleration
to enable motion.
'''
Agent.__init__(self, env,
kp=np.array([[-2, -2], [2, -2], [2, 2], [-2, 2]]),
color=(1, 0, 0, 0.5),
v_max=2.0)
self.sensor = Sensor(env, self)
def update(self, t):
self.sensor.read()
v_x = self.v_max * (np.cos(t) + np.random.rand())
v_y = self.v_max * (np.sin(t) + np.random.rand())
# the renderer
self.v = np.array([v_x, v_y])
self.va = np.pi
# example of a range sensor that changes color when detect other agents or a obstacle.
class Sensor(Device):
def __init__(self, env, parent, radius=10):
self.radius = radius
kp=np.array([[-radius, 0], [radius, 0]])
Device.__init__(self, env, parent, kp=kp, color=(0, 1, 0, 0.5), filled=True)
def read(self):
'''
return true if detect something.
'''
for a in self.env.agents:
if a is not self.parent:
print(dist(self.parent, a))
if (a is not self.parent) and dist(self.parent, a) < self.radius:
self.geom.set_color_rgba(np.array([1, 0, 0, 0.5]))
return True
if dist(self.parent, self.env.map) < self.radius:
self.geom.set_color_rgba(np.array([1, 0, 0, 0.5]))
return True
# no collision
self.geom.set_color_rgba(np.array([0, 1, 0, 0.5]))
return False
env = Simulator(config=config)
my_map = Map()
my_map.get_map_from_geom2d(env, kp=np.array([[-100, 0], [100, 0]]))
robots = [Robot(env) for i in range(2)]
while True:
t = time()
[r.update(t) for r in robots]
env._render()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In bugbot simulation, either map or agents are subclass of Geom2d which is a wrapper over Geom defined in OpenAI Gym. You can define a Geom2d using keypoints(kp) . if the length of keypoints is less than 3, geom type is automatically parsed as a 'circle', otherwise the default is 'polygon'. For OOP user, you will want to customize your agent as the subclass of the base class. An more compact example is also given here
|
14,221
|
<ASSISTANT_TASK:>
Python Code:
from helpers import load_data
# load dataset
x, y = load_data()
def build_k_indices(y, k_fold, seed):
build k indices for k-fold.
num_row = y.shape[0]
interval = int(num_row / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_row)
k_indices = [indices[k * interval: (k + 1) * interval]
for k in range(k_fold)]
return np.array(k_indices)
from costs import compute_mse
from ridge_regression import ridge_regression
from build_polynomial import build_poly
def cross_validation(y, x, k_indices, k, lambda_, degree):
return the loss of ridge regression.
assert 0 <= k and k < len(k_indices)
x_test = x[k_indices[k]]
y_test = y[k_indices[k]]
x_train = np.delete(x, k_indices[k])
y_train = np.delete(y, k_indices[k])
x_test = build_poly(x_test, degree)
x_train = build_poly(x_train, degree)
# ***************************************************
# INSERT YOUR CODE HERE
# ridge regression: TODO
# ***************************************************
w = ridge_regression(y_train, x_train, lambda_)
# ***************************************************
# INSERT YOUR CODE HERE
# calculate the loss for train and test data: TODO
# ***************************************************
loss_tr = compute_mse(y_train, x_train, w)
loss_te = compute_mse(y_test, x_test, w)
return loss_tr, loss_te
from plots import cross_validation_visualization
def cross_validation_demo():
seed = 1
degree = 7
k_fold = 4
lambdas = np.logspace(-4, 0, 30)
# split data in k fold
k_indices = build_k_indices(y, k_fold, seed)
# define lists to store the loss of training data and test data
rmse_tr = []
rmse_te = []
tr_data = []
te_data = []
for lambda_ in lambdas:
k_rmse_tr = []
k_rmse_te = []
for k in range(len(k_indices)):
loss_tr, loss_te = cross_validation(y, x, k_indices, k, lambda_, degree)
k_rmse_tr.append(loss_tr)
k_rmse_te.append(loss_te)
rmse_tr.append(np.mean(k_rmse_tr))
rmse_te.append(np.mean(k_rmse_te))
tr_data.append(k_rmse_tr)
te_data.append(k_rmse_te)
cross_validation_visualization(lambdas, rmse_tr, rmse_te)
return (tr_data, te_data)
tr_data, te_data = cross_validation_demo()
def draw_plot(data, edge_color, fill_color, sym):
bp = plt.boxplot(data, patch_artist=True, sym=sym)
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bp[element], color=edge_color)
for patch in bp['boxes']:
patch.set(facecolor=fill_color)
draw_plot(te_data, 'red', 'darkred', '+')
draw_plot(tr_data, 'blue', 'darkblue', '*')
plt.ylabel("MSE")
plt.xlabel("Lambda")
plt.title("Cross Validation Mean & Variance")
from least_squares import least_squares
from split_data import split_data
from plots import bias_variance_decomposition_visualization
def bias_variance_demo():
The entry.
# define parameters
seeds = range(100)
num_data = 10000
ratio_train = 0.005
degrees = range(1, 10)
# define list to store the variable
rmse_tr = np.empty((len(seeds), len(degrees)))
rmse_te = np.empty((len(seeds), len(degrees)))
for index_seed, seed in enumerate(seeds):
np.random.seed(seed)
x = np.linspace(0.1, 2 * np.pi, num_data)
y = np.sin(x) + 0.3 * np.random.randn(num_data).T
x_train, y_train, x_test, y_test = split_data(x, y, ratio_train, seed)
for index_degree, degree in enumerate(degrees):
tx_train = build_poly(x_train, degree)
tx_test = build_poly(x_test, degree)
tr_loss, w = least_squares(y_train, tx_train)
rmse_tr[index_seed][index_degree] = np.sqrt(2 * compute_mse(y_train, tx_train, w))
rmse_te[index_seed][index_degree] = np.sqrt(2 * compute_mse(y_test, tx_test, w))
bias_variance_decomposition_visualization(degrees, rmse_tr, rmse_te)
bias_variance_demo()
def bias_variance_ridge_demo():
The entry.
# define parameters
seeds = range(100)
num_data = 10000
ratio_train = 0.005
degrees = range(1, 10)
# define list to store the variable
rmse_tr = np.empty((len(seeds), len(degrees)))
rmse_te = np.empty((len(seeds), len(degrees)))
k_fold = 4
lambdas = np.logspace(-4, 4, 30)
for index_seed, seed in enumerate(seeds):
np.random.seed(seed)
x = np.linspace(0.1, 2 * np.pi, num_data)
y = np.sin(x) + 0.3 * np.random.randn(num_data).T
x_train, y_train, x_test, y_test = split_data(x, y, ratio_train, seed)
for index_degree, degree in enumerate(degrees):
tx_train = build_poly(x_train, degree)
tx_test = build_poly(x_test, degree)
losses = []
for lambda_ in lambdas:
w = ridge_regression(y_train, tx_train, lambda_)
losses.append(compute_mse(y_train, tx_train, w))
best_lambda = lambdas[np.argmin(losses)]
w = ridge_regression(y_train, tx_train, best_lambda)
rmse_tr[index_seed][index_degree] = np.sqrt(2 * compute_mse(y_train, tx_train, w))
rmse_te[index_seed][index_degree] = np.sqrt(2 * compute_mse(y_test, tx_test, w))
bias_variance_decomposition_visualization(degrees, rmse_tr, rmse_te)
bias_variance_ridge_demo()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Cross-Validation and Bias-Variance decomposition
Step5: Bias-Variance Decomposition
|
14,222
|
<ASSISTANT_TASK:>
Python Code:
% matplotlib inline
import os, sys, time
import math, random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from joblib import Parallel, delayed
%run 'ssvm.ipynb'
check_protocol = True
traj_group_test = dict()
test_ratio = 0.3
for key in sorted(TRAJ_GROUP_DICT.keys()):
group = sorted(TRAJ_GROUP_DICT[key])
num = int(test_ratio * len(group))
if num > 0:
np.random.shuffle(group)
traj_group_test[key] = set(group[:num])
if check_protocol == True:
nnrand_dict = dict()
ssvm_dict = dict()
# train set
trajid_set_train = set(trajid_set_all)
for key in traj_group_test.keys():
trajid_set_train = trajid_set_train - traj_group_test[key]
# train ssvm
poi_info = calc_poi_info(list(trajid_set_train), traj_all, poi_all)
# build POI_ID <--> POI__INDEX mapping for POIs used to train CRF
# which means only POIs in traj such that len(traj) >= 2 are included
poi_set = set()
for x in trajid_set_train:
if len(traj_dict[x]) >= 2:
poi_set = poi_set | set(traj_dict[x])
poi_ix = sorted(poi_set)
poi_id_dict, poi_id_rdict = dict(), dict()
for idx, poi in enumerate(poi_ix):
poi_id_dict[poi] = idx
poi_id_rdict[idx] = poi
# generate training data
train_traj_list = [traj_dict[x] for x in trajid_set_train if len(traj_dict[x]) >= 2]
node_features_list = Parallel(n_jobs=N_JOBS)\
(delayed(calc_node_features)\
(tr[0], len(tr), poi_ix, poi_info, poi_clusters=POI_CLUSTERS, \
cats=POI_CAT_LIST, clusters=POI_CLUSTER_LIST) for tr in train_traj_list)
edge_features = calc_edge_features(list(trajid_set_train), poi_ix, traj_dict, poi_info)
assert(len(train_traj_list) == len(node_features_list))
X_train = [(node_features_list[x], edge_features.copy(), \
(poi_id_dict[train_traj_list[x][0]], len(train_traj_list[x]))) for x in range(len(train_traj_list))]
y_train = [np.array([poi_id_dict[x] for x in tr]) for tr in train_traj_list]
assert(len(X_train) == len(y_train))
# train
sm = MyModel()
verbose = 0 #5
ssvm = OneSlackSSVM(model=sm, C=SSVM_C, n_jobs=N_JOBS, verbose=verbose)
ssvm.fit(X_train, y_train, initialize=True)
print('SSVM training finished, start predicting.'); sys.stdout.flush()
# predict for each query
for query in sorted(traj_group_test.keys()):
ps, L = query
# start should be in training set
if ps not in poi_set: continue
assert(L <= poi_info.shape[0])
# prediction of ssvm
node_features = calc_node_features(ps, L, poi_ix, poi_info, poi_clusters=POI_CLUSTERS, \
cats=POI_CAT_LIST, clusters=POI_CLUSTER_LIST)
# normalise test features
unaries, pw = scale_features_linear(node_features, edge_features, node_max=sm.node_max, node_min=sm.node_min, \
edge_max=sm.edge_max, edge_min=sm.edge_min)
X_test = [(unaries, pw, (poi_id_dict[ps], L))]
# test
y_pred = ssvm.predict(X_test)
rec = [poi_id_rdict[x] for x in y_pred[0]] # map POIs back
rec1 = [ps] + rec[1:]
ssvm_dict[query] = rec1
# prediction of nearest neighbour
candidates_id = sorted(TRAJ_GROUP_DICT[query] - traj_group_test[query])
assert(len(candidates_id) > 0)
np.random.shuffle(candidates_id)
nnrand_dict[query] = traj_dict[candidates_id[0]]
if check_protocol == True:
F1_ssvm = []; pF1_ssvm = []; Tau_ssvm = []
F1_nn = []; pF1_nn = []; Tau_nn = []
for key in sorted(ssvm_dict.keys()):
assert(key in nnrand_dict)
F1, pF1, tau = evaluate(ssvm_dict[key], traj_group_test[key])
F1_ssvm.append(F1); pF1_ssvm.append(pF1); Tau_ssvm.append(tau)
F1, pF1, tau = evaluate(nnrand_dict[key], traj_group_test[key])
F1_nn.append(F1); pF1_nn.append(pF1); Tau_nn.append(tau)
print('SSVM: F1 (%.3f, %.3f), pairsF1 (%.3f, %.3f) Tau (%.3f, %.3f)' % \
(np.mean(F1_ssvm), np.std(F1_ssvm)/np.sqrt(len(F1_ssvm)), \
np.mean(pF1_ssvm), np.std(pF1_ssvm)/np.sqrt(len(pF1_ssvm)),
np.mean(Tau_ssvm), np.std(Tau_ssvm)/np.sqrt(len(Tau_ssvm))))
print('NNRAND: F1 (%.3f, %.3f), pairsF1 (%.3f, %.3f), Tau (%.3f, %.3f)' % \
(np.mean(F1_nn), np.std(F1_nn)/np.sqrt(len(F1_nn)), \
np.mean(pF1_nn), np.std(pF1_nn)/np.sqrt(len(pF1_nn)), \
np.mean(Tau_nn), np.std(Tau_nn)/np.sqrt(len(Tau_nn))))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run notebook ssvm.ipynb
Step2: Sanity check for evaluation protocol
|
14,223
|
<ASSISTANT_TASK:>
Python Code:
import kfp
import kfp.gcp as gcp
import kfp.dsl as dsl
import kfp.compiler as compiler
import kfp.components as comp
import datetime
import kubernetes as k8s
# Required Parameters
PROJECT_ID='<ADD GCP PROJECT HERE>'
GCS_BUCKET='gs://<ADD STORAGE LOCATION HERE>'
# Optional Parameters, but required for running outside Kubeflow cluster
# The host for 'AI Platform Pipelines' ends with 'pipelines.googleusercontent.com'
# The host for pipeline endpoint of 'full Kubeflow deployment' ends with '/pipeline'
# Examples are:
# https://7c021d0340d296aa-dot-us-central2.pipelines.googleusercontent.com
# https://kubeflow.endpoints.kubeflow-pipeline.cloud.goog/pipeline
HOST = '<ADD HOST NAME TO TALK TO KUBEFLOW PIPELINE HERE>'
# For 'full Kubeflow deployment' on GCP, the endpoint is usually protected through IAP, therefore the following
# will be needed to access the endpoint.
CLIENT_ID = '<ADD OAuth CLIENT ID USED BY IAP HERE>'
OTHER_CLIENT_ID = '<ADD OAuth CLIENT ID USED TO OBTAIN AUTH CODES HERE>'
OTHER_CLIENT_SECRET = '<ADD OAuth CLIENT SECRET USED TO OBTAIN AUTH CODES HERE>'
# This is to ensure the proper access token is present to reach the end point for 'AI Platform Pipelines'
# If you are not working with 'AI Platform Pipelines', this step is not necessary
! gcloud auth print-access-token
# Create kfp client
in_cluster = True
try:
k8s.config.load_incluster_config()
except:
in_cluster = False
pass
if in_cluster:
client = kfp.Client()
else:
if HOST.endswith('googleusercontent.com'):
CLIENT_ID = None
OTHER_CLIENT_ID = None
OTHER_CLIENT_SECRET = None
client = kfp.Client(host=HOST,
client_id=CLIENT_ID,
other_client_id=OTHER_CLIENT_ID,
other_client_secret=OTHER_CLIENT_SECRET)
%%bash
# Create folders if they don't exist.
mkdir -p tmp/reuse_components_pipeline/mnist_training
# Create the Python file that lists GCS blobs.
cat > ./tmp/reuse_components_pipeline/mnist_training/app.py <<HERE
import argparse
from datetime import datetime
import tensorflow as tf
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_path', type=str, required=True, help='Name of the model file.')
parser.add_argument(
'--bucket', type=str, required=True, help='GCS bucket name.')
args = parser.parse_args()
bucket=args.bucket
model_path=args.model_path
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(model.summary())
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=bucket + '/logs/' + datetime.now().date().__str__()),
# Interrupt training if val_loss stops improving for over 2 epochs
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
]
model.fit(x_train, y_train, batch_size=32, epochs=5, callbacks=callbacks,
validation_data=(x_test, y_test))
from tensorflow import gfile
gcs_path = bucket + "/" + model_path
# The export require the folder is new
if gfile.Exists(gcs_path):
gfile.DeleteRecursively(gcs_path)
tf.keras.experimental.export_saved_model(model, gcs_path)
with open('/output.txt', 'w') as f:
f.write(gcs_path)
HERE
%%bash
# Create Dockerfile.
# AI platform only support tensorflow 1.14
cat > ./tmp/reuse_components_pipeline/mnist_training/Dockerfile <<EOF
FROM tensorflow/tensorflow:1.14.0-py3
WORKDIR /app
COPY . /app
EOF
IMAGE_NAME="mnist_training_kf_pipeline"
TAG="latest" # "v_$(date +%Y%m%d_%H%M%S)"
GCR_IMAGE="gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}".format(
PROJECT_ID=PROJECT_ID,
IMAGE_NAME=IMAGE_NAME,
TAG=TAG
)
APP_FOLDER='./tmp/reuse_components_pipeline/mnist_training/'
# In the following, for the purpose of demonstration
# Cloud Build is choosen for 'AI Platform Pipelines'
# kaniko is choosen for 'full Kubeflow deployment'
if HOST.endswith('googleusercontent.com'):
# kaniko is not pre-installed with 'AI Platform Pipelines'
import subprocess
# ! gcloud builds submit --tag ${IMAGE_NAME} ${APP_FOLDER}
cmd = ['gcloud', 'builds', 'submit', '--tag', GCR_IMAGE, APP_FOLDER]
build_log = (subprocess.run(cmd, stdout=subprocess.PIPE).stdout[:-1].decode('utf-8'))
print(build_log)
else:
if kfp.__version__ <= '0.1.36':
# kfp with version 0.1.36+ introduce broken change that will make the following code not working
import subprocess
builder = kfp.containers._container_builder.ContainerBuilder(
gcs_staging=GCS_BUCKET + "/kfp_container_build_staging"
)
kfp.containers.build_image_from_working_dir(
image_name=GCR_IMAGE,
working_dir=APP_FOLDER,
builder=builder
)
else:
raise("Please build the docker image use either [Docker] or [Cloud Build]")
image_name = GCR_IMAGE
%%bash -s "{image_name}"
GCR_IMAGE="${1}"
echo ${GCR_IMAGE}
# Create Yaml
# the image uri should be changed according to the above docker image push output
cat > mnist_pipeline_component.yaml <<HERE
name: Mnist training
description: Train a mnist model and save to GCS
inputs:
- name: model_path
description: 'Path of the tf model.'
type: String
- name: bucket
description: 'GCS bucket name.'
type: String
outputs:
- name: gcs_model_path
description: 'Trained model path.'
type: GCSPath
implementation:
container:
image: ${GCR_IMAGE}
command: [
python, /app/app.py,
--model_path, {inputValue: model_path},
--bucket, {inputValue: bucket},
]
fileOutputs:
gcs_model_path: /output.txt
HERE
import os
mnist_train_op = kfp.components.load_component_from_file(os.path.join('./', 'mnist_pipeline_component.yaml'))
mnist_train_op.component_spec
mlengine_deploy_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/ml_engine/deploy/component.yaml')
def deploy(
project_id,
model_uri,
model_id,
runtime_version,
python_version):
return mlengine_deploy_op(
model_uri=model_uri,
project_id=project_id,
model_id=model_id,
runtime_version=runtime_version,
python_version=python_version,
replace_existing_version=True,
set_default=True)
def deployment_test(project_id: str, model_name: str, version: str) -> str:
model_name = model_name.split("/")[-1]
version = version.split("/")[-1]
import googleapiclient.discovery
def predict(project, model, data, version=None):
Run predictions on a list of instances.
Args:
project: (str), project where the Cloud ML Engine Model is deployed.
model: (str), model name.
data: ([[any]]), list of input instances, where each input instance is a
list of attributes.
version: str, version of the model to target.
Returns:
Mapping[str: any]: dictionary of prediction results defined by the model.
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}'.format(project, model)
if version is not None:
name += '/versions/{}'.format(version)
response = service.projects().predict(
name=name, body={
'instances': data
}).execute()
if 'error' in response:
raise RuntimeError(response['error'])
return response['predictions']
import tensorflow as tf
import json
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
result = predict(
project=project_id,
model=model_name,
data=x_test[0:2].tolist(),
version=version)
print(result)
return json.dumps(result)
# # Test the function with already deployed version
# deployment_test(
# project_id=PROJECT_ID,
# model_name="mnist",
# version='ver_bb1ebd2a06ab7f321ad3db6b3b3d83e6' # previous deployed version for testing
# )
deployment_test_op = comp.func_to_container_op(
func=deployment_test,
base_image="tensorflow/tensorflow:1.15.0-py3",
packages_to_install=["google-api-python-client==1.7.8"])
# Define the pipeline
@dsl.pipeline(
name='Mnist pipeline',
description='A toy pipeline that performs mnist model training.'
)
def mnist_reuse_component_deploy_pipeline(
project_id: str = PROJECT_ID,
model_path: str = 'mnist_model',
bucket: str = GCS_BUCKET
):
train_task = mnist_train_op(
model_path=model_path,
bucket=bucket
).apply(gcp.use_gcp_secret('user-gcp-sa'))
deploy_task = deploy(
project_id=project_id,
model_uri=train_task.outputs['gcs_model_path'],
model_id="mnist",
runtime_version="1.14",
python_version="3.5"
).apply(gcp.use_gcp_secret('user-gcp-sa'))
deploy_test_task = deployment_test_op(
project_id=project_id,
model_name=deploy_task.outputs["model_name"],
version=deploy_task.outputs["version_name"],
).apply(gcp.use_gcp_secret('user-gcp-sa'))
return True
pipeline_func = mnist_reuse_component_deploy_pipeline
experiment_name = 'minist_kubeflow'
arguments = {"model_path":"mnist_model",
"bucket":GCS_BUCKET}
run_name = pipeline_func.__name__ + ' run'
# Submit pipeline directly from pipeline function
run_result = client.create_run_from_pipeline_func(pipeline_func,
experiment_name=experiment_name,
run_name=run_name,
arguments=arguments)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create client
Step2: Build reusable components
Step3: Create a Docker container
Step4: Build docker image
Step5: If you want to use docker to build the image
Step6: Writing your component definition file
Step7: Define deployment operation on AI Platform
Step9: Kubeflow serving deployment component as an option. Note that, the deployed Endppoint URI is not availabe as output of this component.
Step10: Create your workflow as a Python function
Step11: Submit a pipeline run
|
14,224
|
<ASSISTANT_TASK:>
Python Code:
# Planet class definition at the end of Part 1
# code to make sure constructors and get methods all work
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Test your code to make sure that the class definition worked.
|
14,225
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def random_line(m, b, sigma, size=10):
Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
# YOUR CODE HERE
#raise NotImplementedError()
x = np.linspace(-1.0,1.0,size)
if sigma==0:
y=m*x+b
else:
#np.random.normal() creates normal distribution array
y = (m*x)+b+np.random.normal(0.0, sigma**2, size)
return x,y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
def ticks_out(ax):
Move the ticks to the outside of the box.
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
Plot a random line with slope m, intercept b and size points.
# YOUR CODE HERE
#raise NotImplementedError()
x,y=random_line(m, b, sigma, size)
plt.scatter(x,y,color=color)
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
plt.box(False)
plt.xlabel('x')
plt.ylabel('y(x)')
plt.title('Random Line')
plt.tick_params(axis='y', right='off', direction='out')
plt.tick_params(axis='x', top='off', direction='out')
plt.grid(True)
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
# YOUR CODE HERE
#raise NotImplementedError()
interact(plot_random_line, m=(-10.0,10.0), b=(-5.0,5.0),sigma=(0.0,5.0,0.01),size=(10,100,10), color={'red':'r','blue':'b','green':'g'})
#### assert True # use this cell to grade the plot_random_line interact
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Line with Gaussian noise
Step5: Write a function named plot_random_line that takes the same arguments as random_line and creates a random line using random_line and then plots the x and y points using Matplotlib's scatter function
Step6: Use interact to explore the plot_random_line function using
|
14,226
|
<ASSISTANT_TASK:>
Python Code:
import vcsn
%%automaton a1
context = "lal_char(abc), z"
$ -> 0
0 -> 1 <2>a
0 -> 2 <3>a
1 -> 1 a
1 -> 3 <4>a
2 -> 2 a
2 -> 4 a
3 -> $
4 -> $
a1.minimize()
%%automaton a
context = "lal_char, z"
$ -> 0
$ -> 1 <2>
0 -> 0 a
0 -> 1 b
0 -> 2 <3>a,b
0 -> 3 b
1 -> 1 a, b
1 -> 2 a, <2>b
1 -> 3 <2>a
2 -> $ <2>
3 -> $ <2>
a.minimize()
%%automaton a2
context = "lal_char(abcde), b"
$ -> 0
0 -> 1 a
0 -> 3 b
1 -> 1 a
1 -> 2 b
2 -> 2 a
2 -> 5 b
3 -> 3 a
3 -> 4 b
4 -> 4 a
4 -> 5 b
5 -> 5 a, b
5 -> $
a2.minimize("signature")
a2.is_deterministic()
a2.minimize("moore")
a2.minimize("brzozowski")
a2.minimize("hopcroft")
a = vcsn.b.expression('ab+ab').standard()
a.transpose().type()
a.transpose().minimize().type()
a.minimize().transpose().type()
z = vcsn.context('lal_char, z')
a1 = z.expression('<2>abc').standard()
a2 = z.expression('ab<2>c').standard()
a = a1 | a2
a
a.minimize()
m = a.minimize().cominimize()
m
m.type()
m2 = a.cominimize().cominimize().minimize().minimize()
m2
m == m2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Weighted
Step2: The following example is taken from lombardy.2005.tcs, Fig. 4.
Step3: Signature
Step4: Moore
Step5: Brzozowski
Step6: Hopcroft
Step7: Minimization of transposed automaton
Step8: Repeated Minimization/Cominimization
Step9: Note that the initial and final states are labeled 0,4 and 3,7 , not {0}, {4} and {3,7} as would have been the case if the two levels of decorations had been kept. Indeed, the type of m is simple
Step10: We obtain the exact same result (including decorations) even with repeated invocations, even in a different order
|
14,227
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import os
import os.path
import re
import astropy.table
import astropy.units as u
from astropy.io import fits
from astropy import wcs
def read_raytracing(num_sections=14, num_groups=12):
# Initialize the result arrays.
wavelength = np.zeros(num_sections)
band_size = np.zeros(num_sections)
field_angle = np.empty(num_groups)
spot_centroid = np.empty((num_sections, num_groups, 2))
rms_spot_radius = np.empty((num_sections, num_groups))
rms_spot_size = np.empty((num_sections, num_groups, 2))
# Read the file into memory.
filename = os.path.join(os.environ['DESIMODEL'],
'data', 'inputs', 'throughput', 'raytracing.txt')
with open(filename, 'r') as f:
lines = f.readlines()
print('Read {0} lines from {1}.'.format(len(lines), filename))
for section in range(num_sections):
i = 86 * section
# First two section headers are "Whole band", "360-375nm".
# Remaining headers have the form: 360-400 band
section_header = lines[i].rstrip()
if section > 0:
wlen_lo, wlen_hi = float(section_header[:3]), float(section_header[4:7])
wavelength[section] = 0.5 * (wlen_lo + wlen_hi)
band_size[section] = wlen_hi - wlen_lo
for group in range(num_groups):
j = i + 7 * group + 2
# Extract the field angle [deg] from a line of the form:
# Field coordinate : 0.00000000E+000 4.50000000E-001
fa = float(lines[j].rstrip().split()[-1])
if section == 0:
field_angle[group] = fa
else:
assert field_angle[group] == fa
# Extract the spot centroid [mm] from a line of the form:
# Image coordinate : -1.01677290E-014 1.09734671E+002
spot_centroid[section, group, :] = [float(x) for x in lines[j + 1].split()[-2:]]
# Extract the RMS spot radius from a line of the form:
# RMS Spot Radius : 1.91079098E+001 µm
rms_spot_radius[section, group] = float(lines[j+2].split()[-2])
# Extract the RMS spot X, Y sizes from lines of the form:
# RMS Spot X Size : 1.06094918E+001 µm
rms_spot_size[section, group, 0] = float(lines[j+3].split()[-2])
rms_spot_size[section, group, 1] = float(lines[j+4].split()[-2])
# Apply units.
wavelength = wavelength * u.nm
band_size = band_size * u.nm
field_angle = field_angle * u.deg
spot_centroid = spot_centroid * u.mm
rms_spot_radius = rms_spot_radius * u.um
rms_spot_size = rms_spot_size * u.um
return wavelength, band_size, field_angle, spot_centroid, rms_spot_radius, rms_spot_size
wavelength, band_size, field_angle, \
spot_centroid, rms_spot_radius, rms_spot_size = read_raytracing()
def plot_raytracing(wavelength, band_size, field_angle, spot_centroid, rms_spot_radius,
rms_spot_size, save=None):
nwlen, npos = rms_spot_radius.shape[:2]
angle = field_angle.to(u.deg).value
# Initialize plot.
fig, (top, btm) = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
for i in range(1, nwlen):
lo = (wavelength[i] - 0.5 * band_size[i]).to(u.nm).value
hi = (wavelength[i] + 0.5 * band_size[i]).to(u.nm).value
label = '{:.0f}-{:.0f}nm'.format(lo, hi)
top.plot(angle, rms_spot_radius[i].to(u.um).value / np.sqrt(2), label=label)
btm.plot(angle, (spot_centroid[i, :, 1] - spot_centroid[0, :, 1])
.to(u.um).value, label=label)
top.plot(angle, rms_spot_radius[0].to(u.um).value / np.sqrt(2),
'bo-', label='Whole band')
top.legend(bbox_to_anchor=(0.5, 1), loc='upper center',
ncol=nwlen // 3, fontsize='small')
top.set_ylabel('RMS spot radius / sqrt(2) [um]')
top.set_ylim(6., 20.)
top.grid()
btm.set_xlabel('Field Angle [deg]')
btm.set_ylabel('Lateral color shift [um]')
btm.set_ylim(-20, +20)
btm.set_xlim(0., 1.6)
btm.grid()
plt.tight_layout()
if save:
plt.savefig(save)
plot_raytracing(wavelength, band_size, field_angle, spot_centroid,
rms_spot_radius, rms_spot_size, save='raytracing.png')
def get_achromatic_blur(version='v13'):
# Achromatic blur contributions from cells B14:B26 of the throughput sheet.
if version == 'v13':
achromatic_blur = np.array([
4.728, 2.431, 1.049, 3.990, 0.000, 0.000, 0.001,
0.422, 0.205, 0.400, 1.561, 3.544]) * u.um
elif version == 'v10':
# Achromatic blur contributions from cells B43:B72 of the throughput sheet.
achromatic_blur = np.array([
7.629, 2.431, 1.049, 0.667, 0.000, 0.000, 0.001,
0.422, 0.205, 0.400, 1.561, 4.035]) * u.um
else:
raise ValueError('Invalid version: {0}.'.format(version))
return np.sqrt(np.sum(achromatic_blur ** 2))
print(f"v10: {get_achromatic_blur('v10'):.3f} v13: {get_achromatic_blur('v13'):.3f}")
def get_achromatic_rms_offset():
# Achromatic offset contributions from cells B45:B74, omitting B52, of the
# Throughput sheet from
achromatic_offsets = np.array([
0.000, 0.000, 4.000, 0.000, 2.000, 2.000, 3.000, 1.000, 2.000,
2.000, 1.500, 2.000, 1.500, 1.420, 0.110, 0.090, 0.000, 0.400, 0.090,
0.062, 1.173, 0.002, 0.006, 0.195, 0.030, 0.012, 0.391, 0.469]) * u.um
return np.sqrt(np.sum(achromatic_offsets ** 2))
print(f'sigma1d = {get_achromatic_rms_offset() / np.sqrt(2):.3f}')
def extrapolate(x_in, y_in, x_lo, x_hi):
assert x_in.shape == y_in.shape
try:
x_unit = x_in.unit
except AttributeError:
x_unit = 1
try:
y_unit = y_in.unit
except AttributeError:
y_unit = 1
dx = x_in[1:] - x_in[:-1]
dy = y_in[1:] - y_in[:-1]
y_lo = y_in[0] + dy[0] / dx[0] * (x_lo - x_in[0])
y_hi = y_in[-1] + dy[-1] / dx[-1] * (x_hi - x_in[-1])
x_out = np.hstack(
(x_lo.to(x_unit).value,
x_in.to(x_unit).value,
x_hi.to(x_unit).value)) * x_unit
y_out = np.hstack(
(y_lo.to(y_unit).value,
y_in.to(y_unit).value,
y_hi.to(y_unit).value)) * y_unit
return x_out, y_out
def save_blur(wavelength, field_angle, raytracing_blur,
wlen_lo=355*u.nm, wlen_hi=985*u.nm,
filename='DESI-0347_blur.ecsv'):
description = \
RMS spot size as a function of wavelength and field angle derived from \
DESI-0347-v10. Chromatic contributions are based on a text file prepared \
by Mike Sholl that is also plotted in the geometric_blur tab of the \
throughput spreadsheet in DESI-0347-v13. Achromatic contributions are taken \
from cells B14:B26 added in quadrature.
table = astropy.table.Table(meta=dict(description=description))
# Add wavelength points at each end.
wlen_ext, _ = extrapolate(wavelength, raytracing_blur[:, 0], wlen_lo, wlen_hi)
# Convert wavelength values from nm to Angstrom.
table['wavelength'] = astropy.table.Column(
wlen_ext, unit='Angstrom', format='{:.1f}',
description='Wavelength')
for i, r in enumerate(field_angle):
column_name = 'r={0:.2f}'.format(r)
# Extrapolate values at each end.
_, blur_ext = extrapolate(wavelength, raytracing_blur[:, i], wlen_lo, wlen_hi)
# Add achromatic blur in quadrature.
blur = np.sqrt(blur_ext ** 2 + get_achromatic_blur() ** 2)
table[column_name] = astropy.table.Column(
blur, unit='micron', format='{:.4f}',
description='RMS spot size at field angle {0}.'.format(column_name))
table.write(filename, format='ascii.ecsv', overwrite=True)
save_blur(wavelength[1:], field_angle, rms_spot_radius[1:] / np.sqrt(2))
def save_offset(wavelength, field_angle, raytracing_offset,
wlen_lo=355*u.nm, wlen_hi=985*u.nm,
filename='DESI-0347_offset.ecsv'):
description = \
Radial centroid offset as a function of wavelength and field angle derived \
from DESI-0347-v10. Values are based on a text file prepared \
by Mike Sholl that is also plotted in the geometric_blur tab of the \
throughput spreadsheet in DESI-0347-v13. Values are derived from ray \
tracing of the ideal optics and do not include the random achromatic \
errors in cells B43:B72 of the throughput sheet.
table = astropy.table.Table(meta=dict(description=description))
# Add wavelength points at each end.
wlen_ext, _ = extrapolate(wavelength, raytracing_offset[:, 0], wlen_lo, wlen_hi)
table['wavelength'] = astropy.table.Column(
wlen_ext, unit='Angstrom', format='{:.1f}',
description='Center of wavelength band')
for i, r in enumerate(field_angle):
column_name = 'r={0:.2f}'.format(r)
# Extrapolate values at each end.
_, offset_ext = extrapolate(wavelength, raytracing_offset[:, i], wlen_lo, wlen_hi)
table[column_name] = astropy.table.Column(
offset_ext, unit='micron', format='{:.4f}',
description='Radial centroid offset at field angle {0}.'.format(column_name))
table.write(filename, format='ascii.ecsv', overwrite=True)
save_offset(wavelength[1:], field_angle, spot_centroid[1:, :, 1] - spot_centroid[0, :, 1])
import desimodel.focalplane.sim
def save_static_offsets(seed=1, fov=3.2 * u.deg):
# Generate random dx, dy offsets on a square grid.
dx, dy = desimodel.focalplane.sim.generate_random_centroid_offsets(seed=seed)
npix = len(dx)
scale = fov.to(u.deg).value / npix
# Save offsets to a FITS file. HDU0 just has a header.
header = fits.Header()
header['COMMENT'] = 'Random focal plane centroid offsets in microns.'
header['SEED'] = seed
hdu0 = fits.PrimaryHDU(header=header)
# HDU1, 2 contain dx, dy offsets in microns as 2D image data.
w = wcs.WCS(naxis=2)
w.wcs.ctype = ['x', 'y']
w.wcs.crpix = [npix / 2. + 0.5, npix / 2. + 0.5]
w.wcs.cdelt = [scale, scale]
w.wcs.crval = [0., 0.]
header = w.to_header()
header['BUNIT'] = 'um'
header['COMMENT'] = '+x component of focal plane offset.'
hdu1 = fits.ImageHDU(data=dx.to(u.um).value.astype(np.float32),
name='XOFFSET', header=header)
header['COMMENT'] = '+y component of focal plane offset.'
hdu2 = fits.ImageHDU(data=dy.to(u.um).value.astype(np.float32),
name='YOFFSET', header=header)
hdus = fits.HDUList([hdu0, hdu1, hdu2])
hdus.writeto('DESI-0347_static_offset_{0}.fits'.format(seed), overwrite=True)
save_static_offsets(seed=1)
save_static_offsets(seed=2)
save_static_offsets(seed=3)
def parse_quantity(string, dimensions=None):
_float_pattern = re.compile(
'\s*([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*')
# Look for a valid number starting the string.
found_number = _float_pattern.match(string)
if not found_number:
raise ValueError('Unable to parse quantity.')
value = float(found_number.group(1))
unit = string[found_number.end():]
quantity = astropy.units.Quantity(value, unit)
if dimensions is not None:
try:
if not isinstance(dimensions, astropy.units.Unit):
dimensions = astropy.units.Unit(dimensions)
quantity = quantity.to(dimensions)
except (ValueError, astropy.units.UnitConversionError):
raise ValueError('Quantity "{0}" is not convertible to {1}.'
.format(string, dimensions))
return quantity
def load_table_2d(filename, x_column_name, y_column_prefix):
table = astropy.table.Table.read(filename, format='ascii.ecsv')
nx = len(table)
x = table[x_column_name].copy()
# Look for columns whose name has the specified prefix.
y_value, y_index = [], []
y_unit, data_unit = 1, 1
for i, colname in enumerate(table.colnames):
if colname.startswith(y_column_prefix):
# Parse the column name as a value.
y = parse_quantity(colname[len(y_column_prefix):])
if y_unit == 1:
y_unit = y.unit
elif y_unit != y.unit:
raise RuntimeError('Column unit mismatch: {0} != {1}.'
.format(y_unit, y.unit))
if data_unit == 1:
data_unit = table[colname].unit
elif data_unit != table[colname].unit:
raise RuntimeError('Data unit mismatch: {0} != {1}.'
.format(data_unit, table[colname].unit))
y_value.append(y.value)
y_index.append(i)
# Prepare the array of y values.
ny = len(y_value)
y = np.array(y_value) * y_unit
# Extract values for each x,y pair.
data = np.empty((nx, ny))
for j, i in enumerate(y_index):
data[:, j] = table.columns[i][:]
return x, y, data * data_unit
def plot_static_offsets(seed=1, subsampling=4, save=None):
hdus = fits.open('DESI-0347_static_offset_{0}.fits'.format(seed))
# Extract the offset images.
dx = hdus['XOFFSET'].data
dy = hdus['YOFFSET'].data
dr = np.sqrt(dx ** 2 + dy ** 2)
# Reconstruct the linear WCS.
hdr = hdus[1].header
w = wcs.WCS(hdr)
nxy = len(dx)
pix = np.arange(nxy)
xlo, ylo, xhi, yhi = w.wcs_pix2world([[0, 0], [nxy - 1, nxy - 1]], 0).flatten()
assert xlo == ylo and xhi == yhi
xy = np.linspace(xlo, xhi, nxy)
xy_pad = hdr['CDELT1']
unit = u.Unit(hdr['BUNIT'])
# Downsample.
dx = dx[::subsampling, ::subsampling]
dy = dy[::subsampling, ::subsampling]
dr = dr[::subsampling, ::subsampling]
xy = xy[::subsampling]
plt.figure(figsize=(12, 10))
plt.quiver(xy, xy, dx, dy, dr, headwidth=1.5, headlength=2.)
plt.xlim(xy[0] - xy_pad, xy[-1] + xy_pad)
plt.ylim(xy[0] - xy_pad, xy[-1] + xy_pad)
plt.xlabel('x-offset from plate center [deg]')
plt.ylabel('y-offset from plate center [deg]')
plt.gca().set_aspect(1)
plt.colorbar(pad=0.01, shrink=0.95).set_label(
'Centroid offset [{0}]'.format(unit))
plt.tight_layout()
if save:
plt.savefig(save)
plot_static_offsets(2, save='static_offset_2.png')
def plot_validation(wgt_exp=1.0, save=None):
# Load the new blur and offset data.
wlen, fangle, blur = load_table_2d('DESI-0347_blur.ecsv', 'wavelength', 'r=')
wlen, fangle, offset = load_table_2d('DESI-0347_offset.ecsv', 'wavelength', 'r=')
# Get the DESI-0347 achromatic blurs and centroid offsets.
blur_v13 = get_achromatic_blur(version='v13')
blur_v10 = get_achromatic_blur(version='v10')
offset_rms = np.sqrt(get_achromatic_rms_offset() ** 2 + desimodel.focalplane.sim.default_offset ** 2)
# Calculate field-angle-weighted averages, to account for the larger number of targets
# at larger field angles.
wgt = fangle.value ** wgt_exp
wgt_sum = np.sum(wgt)
weighted_mean = lambda x: np.sum(wgt * x, axis=1) / wgt_sum
weighted_rms = lambda x: np.sqrt(
np.sum(wgt * x ** 2, axis=-1) * wgt_sum - np.sum(wgt * x, axis=-1) ** 2) / wgt_sum
avg_blur_v13 = weighted_mean(blur)
avg_blur_v10 = weighted_mean(np.sqrt(blur ** 2 - blur_v13 ** 2 + blur_v10 ** 2))
avg_offset = np.sqrt(weighted_rms(offset) ** 2 + offset_rms ** 2)
# Plot the new field-angle-weighted throughput data.
plt.plot(wlen.to(u.Angstrom).value,
avg_blur_v13.to(u.um).value, 'b.-', lw=2, alpha=0.25, label='DESIMODEL blur v13')
plt.plot(wlen.to(u.Angstrom).value,
avg_blur_v10.to(u.um).value, 'b.--', lw=2, alpha=0.25, label='DESIMODEL blur v10')
plt.plot(wlen.to(u.Angstrom).value,
avg_offset.to(u.um).value, 'r.-', lw=2, alpha=0.25, label='DESIMODEL offset')
# Compare with the field-averaged blur and RMS offsets from DESI-347-v12 C27:P27,
wlen347 = np.array([360, 375, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 980])
blur347 = np.array([14.78, 14.51, 13.91, 13.44, 12.86, 12.51,
12.62, 13.03, 13.53, 14.00, 14.37, 14.62, 14.73, 14.83])
rmsoff347 = np.array([13.475, 13.017, 12.412, 11.715, 10.979, 11.050, 11.701,
12.384, 12.874, 13.085, 12.989, 12.759, 12.843, 12.928])
plt.scatter(10 * wlen347, blur347, c='b', label='DESI347-v13')
plt.scatter(10 * wlen347, rmsoff347, c='r', label='DESI347-v13')
plt.legend(bbox_to_anchor=(0.5, 1.0), loc='upper center',
ncol=2, fontsize='small')
plt.xlabel('Wavelength [Angstrom]')
plt.ylabel('Blur, RMS offset [micron]')
plt.xlim(3400, 10000)
plt.grid()
plt.tight_layout()
if save:
plt.savefig(save)
plot_validation(save='validation.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preprocess Ray Tracing Results
Step2: Reproduce the plot on the geometric_blur tab of the DESI-0347-v13 spreadsheet. For reference, the fiber diameter is 107.0um.
Step3: Throughput Spreadsheet Data
Step4: Compare v10, v13 quadrature sums
Step5: Calculate the achromatic RMS centroid offset to generate, as the quadrature sum of cells B45
Step6: Calculate the equivalent 1D RMS to use for the specsim config parameter instrument.offset.sigma1d
Step7: Note that version of specsim <= v0.11.1 used sigma1d = 5.1 since they neglected the contributions from "Lateral shifts due to barrel misalignments relative to M1" in cells B65
Step9: Spot Radius
Step11: Centroid Offset
Step12: Static Focal-Plane Offsets
Step13: Validation
Step14: Define a function that can read either of the ECSV files created above and decode the field angles from the column names.
Step15: Read a static centroid offsets file and plot a subset of them as a vector field. The offsets file is read from the current directory, not $DESIMODEL/data/throughput/.
Step16: Compare the blur and RMS offset values from the DESI-347-v13 Throughput tab, which are tabulated as a function of wavelength only, with equivalent field-averaged quantities calculated from the files generated above, which are tabulated in both field-angle and wavelength. For reference, the fiber diameter is 107.0um. The blur and offset ECSV files are read from the current directory, not $DESIMODEL/data/throughput/.
|
14,228
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import quantecon as qe
# matplotlib settings
plt.rcParams['axes.xmargin'] = 0
plt.rcParams['axes.ymargin'] = 0
def approx_lq(s_star, x_star, f_star, Df_star, DDf_star, g_star, Dg_star, discount):
Return an approximating LQ instance.
Gradient of f: Df_star = np.array([f_s, f_x])
Hessian of f: DDf_star = np.array([[f_ss, f_sx], [f_sx, f_xx]])
Gradient of g: Dg_star = np.array([g_s, g_x])
n = 2
k = 1
sx_star = np.array([s_star, x_star])
# (1, s)' R (1, s) + 2 x N (1, s) + x Q x
Q = np.empty((k, k))
R = np.empty((n, n))
N = np.empty((k, n))
R[0, 0] = -(f_star - Df_star @ sx_star + (sx_star @ DDf_star @ sx_star) / 2)
R[1, 1], N[0, 1], N[0, 1], Q[0, 0] = -DDf_star.ravel() / 2
R[1, 0], N[0, 0] = -(Df_star - DDf_star @ sx_star).ravel() / 2
R[0, 1] = R[1, 0]
# A (1, s) + B x + C w
A = np.empty((n, n))
B = np.empty((n, k))
C = np.zeros((n, 1))
A[0, 0], A[0, 1], B[0, 0] = 1, 0, 0
A[1, 0] = g_star - Dg_star @ sx_star
A[1, 1], B[1, 0] = Dg_star.ravel()
lq = qe.LQ(Q, R, A, B, C, N, beta=discount)
return lq
alpha = 0.2
beta = 0.5
gamma = 0.9
discount = 0.9
f = lambda s, x: (s - x)**(1 - alpha) / (1 - alpha)
f_s = lambda s, x: (s - x)**(-alpha)
f_x = lambda s, x: -f_s(s, x)
f_ss = lambda s, x: -alpha * (s - x)**(-alpha - 1)
f_sx = lambda s, x: -f_ss(s, x)
f_xx = lambda s, x: f_ss(s, x)
g = lambda s, x: gamma * x + x**beta
g_s = lambda s, x: 0
g_x = lambda s, x: gamma + beta * x**(beta - 1)
x_star = ((discount * beta) / (1 - discount * gamma))**(1 / (1 - beta))
s_star = gamma * x_star + x_star**beta
s_star, x_star
f_x(s_star, x_star) + discount * f_s(g(s_star, x_star), x_star) * g_x(s_star, x_star)
f_star = f(s_star, x_star)
Df_star = np.array([f_s(s_star, x_star), f_x(s_star, x_star)])
DDf_star = np.array([[f_ss(s_star, x_star), f_sx(s_star, x_star)],
[f_sx(s_star, x_star), f_xx(s_star, x_star)]])
g_star = g(s_star, x_star)
Dg_star = np.array([g_s(s_star, x_star), g_x(s_star, x_star)])
lq = approx_lq(s_star, x_star, f_star, Df_star, DDf_star, g_star, Dg_star, discount)
P, F, d = lq.stationary_values()
P, F, d
V = lambda s: np.array([1, s]) @ P @ np.array([1, s]) + d
V(s_star)
-f_star / (1 - lq.beta)
X = lambda s: -(F @ np.array([1, s]))[0]
X(s_star)
x_star
X = np.vectorize(X)
s_min, s_max = 5, 10
ss = np.linspace(s_min, s_max, 50)
title = "Optimal Investment Policy"
xlabel = "Wealth"
ylabel = "Investment (% of Wealth)"
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(ss, X(ss)/ss, label='L-Q')
ax.plot(s_star, x_star/s_star, '*', color='k', markersize=10)
ax.set_xlim(s_min, s_max)
ax.set_ylim(0.65, 0.9)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.tick_params(right='on')
ax.legend()
plt.show()
alpha = 4.0
beta = 1.0
gamma = 0.5
kappa = 0.2
discount = 0.9
f = lambda s, x: (s - x)**(1 - gamma) / (1 - gamma) - kappa * (s - x)
f_s = lambda s, x: (s - x)**(-gamma) - kappa
f_x = lambda s, x: -f_s(s, x)
f_ss = lambda s, x: -gamma * (s - x)**(-gamma - 1)
f_sx = lambda s, x: -f_ss(s, x)
f_xx = lambda s, x: f_ss(s, x)
g = lambda s, x: alpha * x - 0.5 * beta * x**2
g_s = lambda s, x: 0
g_x = lambda s, x: alpha - beta * x
x_star = (discount * alpha - 1) / (discount * beta)
s_star = (alpha**2 - 1/discount**2) / (2 * beta)
s_star, x_star
f_x(s_star, x_star) + discount * f_s(g(s_star, x_star), x_star) * g_x(s_star, x_star)
f_star = f(s_star, x_star)
Df_star = np.array([f_s(s_star, x_star), f_x(s_star, x_star)])
DDf_star = np.array([[f_ss(s_star, x_star), f_sx(s_star, x_star)],
[f_sx(s_star, x_star), f_xx(s_star, x_star)]])
g_star = g(s_star, x_star)
Dg_star = np.array([g_s(s_star, x_star), g_x(s_star, x_star)])
lq = approx_lq(s_star, x_star, f_star, Df_star, DDf_star, g_star, Dg_star, discount)
P, F, d = lq.stationary_values()
P, F, d
V = lambda s: np.array([1, s]) @ P @ np.array([1, s]) + d
V(s_star)
-f_star / (1 - lq.beta)
X = lambda s: -(F @ np.array([1, s]))[0]
X(s_star)
x_star
X = np.vectorize(X)
s_min, s_max = 6, 9
ss = np.linspace(s_min, s_max, 50)
harvest = ss - X(ss)
h_star = s_star - x_star
title = "Optimal Harvest Policy"
xlabel = "Available Stock"
ylabel = "Harvest (% of Stock)"
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(ss, harvest/ss, label='L-Q')
ax.plot(s_star, h_star/s_star, '*', color='k', markersize=10)
ax.set_xlim(s_min, s_max)
ax.set_ylim(0.5, 0.75)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.tick_params(right='on')
ax.legend()
plt.show()
shadow_price = lambda s: -2 * (P @ [1, s])[1]
shadow_price = np.vectorize(shadow_price)
title = "Shadow Price Function"
ylabel = "Price"
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(ss, shadow_price(ss), label='L-Q')
ax.plot(s_star, shadow_price(s_star), '*', color='k', markersize=10)
ax.set_xlim(s_min, s_max)
ax.set_ylim(0.2, 0.4)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.tick_params(right='on')
ax.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: We consider a dynamic maximization problem with
Step3: Optimal Economic Growth
Step4: Function definitions
Step5: Steady state
Step6: (s_star, x_star) satisfies the Euler equations
Step7: Construct $f^$, $\nabla f^$, $D^2 f^$, $g^$, and $\nabla g^*$
Step8: LQ Approximation
Step9: Solution by LQ.stationary_values
Step10: The optimal value function (of the LQ minimization problem)
Step11: The value at $s^*$
Step12: The optimal policy function
Step13: The optimal choice at $s^*$
Step14: Renewable Resource Management
|
14,229
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
?plt.scatter()
from matplotlib import markers
markers.MarkerStyle.markers.keys()
x = np.random.rand(100)
y = np.random.rand(100)
plt.scatter(x, y, label = 'The Dots', c = u'r', marker = u'o')
plt.grid(True)
plt.box(False)
plt.xlabel('The X-Axis')
plt.ylabel('The Y-Axis')
plt.legend(loc=0) ##I have no idea if you wanted a legend... but I tried to find the best place for it
data = np.random.rand(100)
data
?plt.hist()
plt.hist(data, bins = 30, histtype = u'step', color = 'g')
plt.box(True)
plt.xlabel('The X-Axis for Histograms')
plt.ylabel('The Y-Axis for Histograms')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Scatter plots
Step2: Histogram
|
14,230
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
g = 9.81 # m/s^2
l = 0.5 # length of pendulum, in meters
tmax = 50. # seconds
t = np.linspace(0, tmax, int(100*tmax))
def derivs(y, t, a, b, omega0):
Compute the derivatives of the damped, driven pendulum.
Parameters
----------
y : ndarray
The solution vector at the current time t[i]: [theta[i],omega[i]].
t : float
The current time t[i].
a, b, omega0: float
The parameters in the differential equation.
Returns
-------
dy : ndarray
The vector of derviatives at t[i]: [dtheta[i],domega[i]].
# YOUR CODE HERE
# raise NotImplementedError()
th_t = y[0]
omega_t = y[1]
domega = -g/1 *np.sin(th_t)-a*omega_t-b*np.sin(omega0*t)
dth = omega_t
dy = np.array((dth, domega))
return dy
assert np.allclose(derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0), [1.,-1.])
def energy(y):
Compute the energy for the state array y.
The state array y can have two forms:
1. It could be an ndim=1 array of np.array([theta,omega]) at a single time.
2. It could be an ndim=2 array where each row is the [theta,omega] at single
time.
Parameters
----------
y : ndarray, list, tuple
A solution vector
Returns
-------
E/m : float (ndim=1) or ndarray (ndim=2)
The energy per mass.
# YOUR CODE HERE
# raise NotImplementedError()
if y.ndim==1:
th = y[0]
omega = y[1]
elif y.ndim==2:
th= y[:,0]
omega = y[:,1]
return g*l*(1-np.cos(th)) + 0.5*(l**2)*omega**2
assert np.allclose(energy(np.array([np.pi,0])),g)
assert np.allclose(energy(np.ones((10,2))), np.ones(10)*energy(np.array([1,1])))
# YOUR CODE HERE
# raise NotImplementedError()
thi = np.pi
omegai = 0.
ic = np.array([thi, omegai])
y = odeint(derivs, ic, t, args=(0.0,0.0,0.0), atol=1e-6, rtol=1e-5)
# YOUR CODE HERE
# raise NotImplementedError()
plt.plot(t, energy(y))
plt.xlabel('$t$')
plt.ylabel('$E/m$')
plt.title('Energy/mass vs. time');
# YOUR CODE HERE
# raise NotImplementedError()
plt.plot(t, y[:,0], label='$\theta(t)$')
plt.plot(t, y[:,1], label='$\omega(t)$')
plt.xlabel('$t$')
plt.ylabel('Solution')
plt.title('variables vs. time');
assert True # leave this to grade the two plots and their tuning of atol, rtol.
def plot_pendulum(a=0.0, b=0.0, omega0=0.0):
Integrate the damped, driven pendulum and make a phase plot of the solution.
# YOUR CODE HERE
# raise NotImplementedError()
thi = -np.pi+0.1
omegai = 0.0
ic = np.array([thi, omegai])
y = odeint(derivs, ic, t, args=(a,b,omega0), atol=1e-10, rtol=1e-9)
plt.plot(y[:,0], y[:,1])
plt.xlim(-2.0*np.pi,2.0*np.pi)
plt.ylim(-10,10)
plt.xlabel('$\theta(t)$')
plt.ylabel('$\omega(t)$')
plot_pendulum(0.5, 0.0, 0.0)
# YOUR CODE HERE
# raise NotImplementedError()
w = interact(plot_pendulum, a=(0.0,1.0,0.1), b=(0.0,10.0,0.1), omega0=(0.0,10.0,0.1));
w
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Damped, driven nonlinear pendulum
Step4: Write a function derivs for usage with scipy.integrate.odeint that computes the derivatives for the damped, driven harmonic oscillator. The solution vector at each time will be $\vec{y}(t) = (\theta(t),\omega(t))$.
Step5: Simple pendulum
Step7: Damped pendulum
Step8: Here is an example of the output of your plot_pendulum function that should show a decaying spiral.
Step9: Use interact to explore the plot_pendulum function with
|
14,231
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries
import numpy as np
import pandas as pd
# Read student data
student_data = pd.read_csv("student-data.csv")
print "Student data read successfully!"
# Note: The last column 'passed' is the target/label, all other are feature columns
# TODO: Compute desired values - replace each '?' with an appropriate expression/function call
shape = student_data.shape
n_students = shape[0]
n_features = shape[1]-1 # the last column is the target
n_passed = len(student_data[student_data.passed == 'yes'])
n_failed = len(student_data[student_data.passed == 'no'])
grad_rate = 100*float(n_passed)/n_students
print "Total number of students: {}".format(n_students)
print "Number of students who passed: {}".format(n_passed)
print "Number of students who failed: {}".format(n_failed)
print "Number of features: {}".format(n_features)
print "Graduation rate of the class: {:.2f}%".format(grad_rate)
# Extract feature (X) and target (y) columns
feature_cols = list(student_data.columns[:-1]) # all columns but last are features
target_col = student_data.columns[-1] # last column is the target/label
print "Feature column(s):-\n{}".format(feature_cols)
print "Target column: {}".format(target_col)
X_all = student_data[feature_cols] # feature values for all students
y_all = student_data[target_col] # corresponding targets/labels
print "\nFeature values:-"
print X_all.head() # print the first 5 rows
# Preprocess feature columns
def preprocess_features(X):
outX = pd.DataFrame(index=X.index) # output dataframe, initially empty
# Check each column
for col, col_data in X.iteritems():
# If data type is non-numeric, try to replace all yes/no values with 1/0
if col_data.dtype == object:
col_data = col_data.replace(['yes', 'no'], [1, 0])
# Note: This should change the data type for yes/no columns to int
# If still non-numeric, convert to one or more dummy variables
if col_data.dtype == object:
col_data = pd.get_dummies(col_data, prefix=col) # e.g. 'school' => 'school_GP', 'school_MS'
outX = outX.join(col_data) # collect column(s) in output dataframe
return outX
X_all = preprocess_features(X_all)
print "Processed feature columns ({}):-\n{}".format(len(X_all.columns), list(X_all.columns))
# First, decide how many training vs test samples you want
num_all = student_data.shape[0] # same as len(student_data)
num_train = 300 # about 75% of the data
num_test = num_all - num_train
# TODO: Then, select features (X) and corresponding labels (y) for the training and test sets
# Note: Shuffle the data or randomly select samples to avoid any bias due to ordering in the dataset
indices = range(num_all)
import random
random.shuffle(indices)
train_indices = indices[:num_train]
test_indices = indices[-num_test:]
X_train = X_all.iloc[train_indices]
y_train = y_all[train_indices]
X_test = X_all.iloc[test_indices]
y_test = y_all[test_indices]
print "Training set: {} samples".format(X_train.shape[0])
print "Test set: {} samples".format(X_test.shape[0])
# Note: If you need a validation set, extract it from within training data
# Train a model
import time
def train_classifier(clf, X_train, y_train):
print "Training {}...".format(clf.__class__.__name__)
start = time.time()
clf = clf.fit(X_train, y_train)
end = time.time()
print "Done!\nTraining time (secs): {:.3f}".format(end - start)
# TODO: Choose a model, import it and instantiate an object
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(max_depth=3)
# Fit model to training data
train_classifier(clf, X_train, y_train) # note: using entire training set here
print clf # you can inspect the learned model by printing it
# Predict on training set and compute F1 score
from sklearn.metrics import f1_score
def predict_labels(clf, features, target):
print "Predicting labels using {}...".format(clf.__class__.__name__)
start = time.time()
y_pred = clf.predict(features)
end = time.time()
print "Done!\nPrediction time (secs): {:.3f}".format(end - start)
return f1_score(target.values, y_pred, pos_label='yes')
train_f1_score = predict_labels(clf, X_train, y_train)
print "F1 score for training set: {}".format(train_f1_score)
# Predict on test data
print "F1 score for test set: {}".format(predict_labels(clf, X_test, y_test))
# Train and predict using different training set sizes
def train_predict(clf, X_train, y_train, X_test, y_test):
print "------------------------------------------"
print "Training set size: {}".format(len(X_train))
train_classifier(clf, X_train, y_train)
print "F1 score for training set: {}".format(predict_labels(clf, X_train, y_train))
print "F1 score for test set: {}".format(predict_labels(clf, X_test, y_test))
num_all = student_data.shape[0] # same as len(student_data)
num_test = 95
test_indices = indices[-num_test:]
X_test = X_all.iloc[test_indices]
y_test = y_all[test_indices]
indices = range(num_all)
import random
random.shuffle(indices)
def try_different_training_sizes(clf):
# TODO: Run the helper function above for desired subsets of training data
# Note: Keep the test set constant
for size in (100, 200, 300):
train_indices = indices[:size]
X_train = X_all.iloc[train_indices]
y_train = y_all[train_indices]
train_predict(clf, X_train, y_train, X_test, y_test)
# TODO: Train and predict using two other models
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import make_scorer, f1_score
svc = SVC(random_state=0)
bc = BaggingClassifier()
# Using cross validation to figure out the max depth for the Decision Tree Classifier
# Reference: http://blog.kaggle.com/2015/06/29/scikit-learn-video-7-optimizing-your-model-with-cross-validation/
all_scores = []
for md in range(1,11):
clf = DecisionTreeClassifier(max_depth=md, random_state=0)
scores = cross_val_score(clf, X_all, y_all, scoring=make_scorer(f1_score, pos_label='yes'))
all_scores.append((md, scores.mean(),))
print all_scores
# The maximum f1 score occurs for max_depth 1, so using it!
dtc = DecisionTreeClassifier(max_depth=1,random_state=0)
try_different_training_sizes(dtc)
try_different_training_sizes(bc)
try_different_training_sizes(svc)
# TODO: Fine-tune your model and report the best F1 score
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import make_scorer, f1_score
dt_parameters = {'max_depth':(3,2,1),}
clf = DecisionTreeClassifier(random_state=0)
classifier = GridSearchCV(clf,dt_parameters, scoring=make_scorer(f1_score, pos_label='yes'))
# Fit the learner to the training data to obtain the best parameter set
print "Final Model: "
train_classifier(classifier, X_train, y_train)
# pick the best classifier
classifier = classifier.best_estimator_
print "Best model: ", classifier
print predict_labels(classifier, X_test, y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, can you find out the following facts about the dataset?
Step2: 3. Preparing the Data
Step3: Preprocess feature columns
Step4: Split data into training and test sets
Step5: 4. Training and Evaluating Models
Step6: ##### Decision Tree Classifier (with max_depth 1)
|
14,232
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
queries, documents = load_data()
assert type(queries) == list
assert type(documents) == list
tfidf = TfidfVectorizer()
tfidf.fit_transform(documents)
from sklearn.metrics.pairwise import cosine_similarity
cosine_similarities_of_queries = []
for query in queries:
query_tfidf = tfidf.transform([query])
cosine_similarities_of_queries.append(cosine_similarity(query_tfidf, tfidf.transform(documents)).flatten())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
14,233
|
<ASSISTANT_TASK:>
Python Code:
import pints
import pints.toy as toy
import pints.plot
import numpy as np
import matplotlib.pyplot as plt
# Use the toy logistic model
model = toy.LogisticModel()
real_parameters = [0.015, 500]
times = np.linspace(0, 1000, 100)
org_values = model.simulate(real_parameters, times)
# Add independent Gaussian noise
noise = 50
values = org_values + np.random.normal(0, noise, org_values.shape)
# Set up the problem and run the optimisation
problem = pints.SingleOutputProblem(model, times, values)
score = pints.SumOfSquaresError(problem)
boundaries = pints.RectangularBoundaries([0, 200], [1, 1000])
x0 = np.array([0.5, 500])
found_parameters, found_value = pints.optimise(
score,
x0,
boundaries=boundaries,
method=pints.XNES,
)
print('Score at true solution: ')
print(score(real_parameters))
print('Found solution: True parameters:' )
for k, x in enumerate(found_parameters):
print(pints.strfloat(x) + ' ' + pints.strfloat(real_parameters[k]))
fig, ax = pints.plot.series(np.array([found_parameters]), problem, ref_parameters=real_parameters)
fig.set_size_inches(15, 7.5)
plt.show()
from pints.residuals_diagnostics import plot_residuals_autocorrelation
# Plot the autocorrelation
fig = plot_residuals_autocorrelation(np.array([found_parameters]),
problem)
plt.show()
import pints.noise
# Use the toy logistic model
model = toy.LogisticModel()
real_parameters = [0.015, 500]
times = np.linspace(0, 1000, 100)
org_values = model.simulate(real_parameters, times)
# Add AR(1) noise
rho = 0.75
sigma = 50
values = org_values + pints.noise.ar1(rho, sigma, len(org_values))
# Set up the problem and run the optimisation
problem = pints.SingleOutputProblem(model, times, values)
score = pints.SumOfSquaresError(problem)
boundaries = pints.RectangularBoundaries([0, 200], [1, 1000])
x0 = np.array([0.5, 500])
found_parameters, found_value = pints.optimise(
score,
x0,
boundaries=boundaries,
method=pints.XNES,
)
print('Score at true solution: ')
print(score(real_parameters))
print('Found solution: True parameters:' )
for k, x in enumerate(found_parameters):
print(pints.strfloat(x) + ' ' + pints.strfloat(real_parameters[k]))
fig, ax = pints.plot.series(np.array([found_parameters]), problem, ref_parameters=real_parameters)
fig.set_size_inches(15, 7.5)
plt.show()
# Plot the autocorrelation
fig = plot_residuals_autocorrelation(np.array([found_parameters]),
problem)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
import pints
import pints.toy
model = pints.toy.LotkaVolterraModel()
times = np.linspace(0, 3, 50)
parameters = model.suggested_parameters()
model.set_initial_conditions([2, 2])
org_values = model.simulate(parameters, times)
# Add noise
sigma = 0.1
values = org_values + np.random.normal(0, sigma, org_values.shape)
# Create an object with links to the model and time series
problem = pints.MultiOutputProblem(model, times, values)
# Create a log posterior
log_prior = pints.UniformLogPrior([0, 0, 0, 0, 0, 0], [6, 6, 6, 6, 1, 1])
log_likelihood = pints.GaussianLogLikelihood(problem)
log_posterior = pints.LogPosterior(log_likelihood, log_prior)
# Run MCMC on the noisy data
x0 = [[1.0, 1.0, 1.0, 1.0, 0.1, 0.1]]*3
mcmc = pints.MCMCController(log_posterior, 3, x0)
mcmc.set_max_iterations(4000)
print('Running')
chains = mcmc.run()
print('Done!')
# Get the first MCMC chain
chain1 = chains[0]
# Cut off the burn-in samples
chain1 = chain1[2500:]
fig, ax = pints.plot.series(chain1, problem, ref_parameters=parameters)
fig.set_size_inches(15, 7.5)
plt.show()
# Plot the autocorrelation
fig = plot_residuals_autocorrelation(chain1, problem)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualisation of the data
Step2: Plotting autocorrelation of the residuals
Step3: The figure shows no significant autocorrelation in the residuals. Therefore, the assumption of independent noise may be valid.
Step4: Visualisation of the data
Step5: Now the autocorrelation plot of the residuals shows high autocorrelation at small lags, which is typical of AR(1) noise. Therefore, this visualisation suggests that the assumption of independent Gaussian noise which we made during inference is invalid.
Step6: Visualisation of the data
|
14,234
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
# config directory must have "__init__.py" file
# from the 'config' directory, import the following classes:
from config import Motor, ASI_Controller, Autosipper
from config import utils as ut
autosipper = Autosipper(Motor('config/motor.yaml'), ASI_Controller('config/asi_controller.yaml'))
autosipper.coord_frames
from config import gui
gui.stage_control(autosipper.XY, autosipper.Z)
# add/determine deck info
autosipper.coord_frames.deck.position_table = ut.read_delim_pd('config/position_tables/deck')
# check deck alignment
# CLEAR DECK OF OBSTRUCTIONS!!
autosipper.go_to('deck', ['name'],'align')
# add plate
from config import utils as ut
platemap = ut.generate_position_table((8,8),(9,9),93.5)
platemap[]
ut.lookup(platemap)
from config import Manifold
manifold = Manifold('192.168.1.3', 'config/valvemaps/valvemap.csv', 512)
manifold.valvemap[manifold.valvemap.name>0]
def valve_states():
tmp = []
for i in [2,0,14,8]:
status = 'x'
if manifold.read_valve(i):
status = 'o'
tmp.append([status, manifold.valvemap.name.iloc[i]])
return pd.DataFrame(tmp)
tmp = []
for i in range(16):
status = 'x'
if manifold.read_valve(i):
status = 'o'
name = manifold.valvemap.name.iloc[i]
tmp.append([status, name])
pd.DataFrame(tmp).replace(np.nan, '')
name = 'inlet_in'
v = manifold.valvemap['valve'][manifold.valvemap.name==name]
v=14
manifold.depressurize(v)
manifold.pressurize(v)
manifold.exit()
# !!!! Also must have MM folder on system PATH
# mm_version = 'C:\Micro-Manager-1.4'
# cfg = 'C:\Micro-Manager-1.4\SetupNumber2_05102016.cfg'
mm_version = 'C:\Program Files\Micro-Manager-2.0beta'
cfg = 'C:\Program Files\Micro-Manager-2.0beta\Setup2_20170413.cfg'
import sys
sys.path.insert(0, mm_version) # make it so python can find MMCorePy
import MMCorePy
from PIL import Image
core = MMCorePy.CMMCore()
core.loadSystemConfiguration(cfg)
core.setProperty("Spectra", "White_Enable", "1")
core.waitForDevice("Spectra")
core.setProperty("Cam Andor_Zyla4.2", "Sensitivity/DynamicRange", "16-bit (low noise & high well capacity)") # NEED TO SET CAMERA TO 16 BIT (ceiling 12 BIT = 4096)
core.setProperty("Spectra", "White_Enable", "0")
log = []
autosipper.Z.move(93.5)
manifold.depressurize(2)
manifold.depressurize(0)
log.append([time.ctime(time.time()), 'open inlet_in, inlet_out'])
valve_states()
text = 'fluorescence observed'
log.append([time.ctime(time.time()), text])
text = 'CLOSE inlet_out'
manifold.pressurize(0)
log.append([time.ctime(time.time()), text])
text = 'OPEN chip_in, chip_out'
manifold.depressurize(14)
manifold.depressurize(8)
log.append([time.ctime(time.time()), text])
valve_states()
text = 'fill'
log.append([time.ctime(time.time()), text])
manifold.pressurize(8)
#closed all
autosipper.Z.move(93.5)
manifold.depressurize(2)
manifold.depressurize(0)
log.append([time.ctime(time.time()), 'open inlet_in, inlet_out'])
valve_states()
text = 'fluorescence removed'
log.append([time.ctime(time.time()), text])
text = 'CLOSE inlet_out'
manifold.pressurize(0)
log.append([time.ctime(time.time()), text])
text = 'OPEN chip_in, chip_out'
manifold.depressurize(14)
manifold.depressurize(8)
log.append([time.ctime(time.time()), text])
valve_states()
text = 'flush'
log.append([time.ctime(time.time()), text])
manifold.pressurize(8)
for i in [2,0,14,8]:
manifold.pressurize(i)
log
core.setConfig('Channel','2_BF')
core.setProperty(core.getCameraDevice(), "Exposure", 20)
core.snapImage()
img = core.getImage()
plt.imshow(img,cmap='gray')
image = Image.fromarray(img)
# image.save('TESTIMAGE.tif')
position_list = ut.load_mm_positionlist("C:/Users/fordycelab/Desktop/D1_cjm.pos")
position_list
def acquire():
for i in xrange(len(position_list)):
si = str(i)
x,y = position_list[['x','y']].iloc[i]
core.setXYPosition(x,y)
core.waitForDevice(core.getXYStageDevice())
logadd(log, 'moved '+si)
core.snapImage()
# core.waitForDevice(core.getCameraDevice())
logadd(log, 'snapped '+si)
img = core.getImage()
logadd(log, 'got image '+si)
image = Image.fromarray(img)
image.save('images/images_{}.tif'.format(i))
logadd(log, 'saved image '+si)
x,y = position_list[['x','y']].iloc[0]
core.setXYPosition(x,y)
core.waitForDevice(core.getXYStageDevice())
logadd(log, 'moved '+ str(0))
def logadd(log,st):
log.append([time.ctime(time.time()), st])
print log[-1]
# Auto
core.setAutoShutter(True) # default
core.snapImage()
# Manual
core.setAutoShutter(False) # disable auto shutter
core.setProperty("Shutter", "State", "1")
core.waitForDevice("Shutter")
core.snapImage()
core.setProperty("Shutter", "State", "0")
core.getFocusDevice()
core.getCameraDevice()
core.XYStageDevice()
core.getDevicePropertyNames(core.getCameraDevice())
import cv2
from IPython import display
import numpy as np
from ipywidgets import widgets
import time
# core.initializeCircularBuffer()
# core.setCircularBufferMemoryFootprint(4096) # MiB
cv2.WND
# video with button (CV2)
live = widgets.Button(description='Live')
close = widgets.Button(description='Close')
display.display(widgets.HBox([live, close]))
def on_live_clicked(b):
display.clear_output(wait=True)
print 'LIVE'
core.startContinuousSequenceAcquisition(1000) # time overridden by exposure
time.sleep(.2)
cv2.namedWindow('Video', cv2.WINDOW_NORMAL)
cv2.setWindowProperty('Video', cv2.WND_PROP_ASPECT_RATIO, cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow('Video', 500,500)
img = np.zeros((500,500))
print 'To stop, click window + press ESC'
while(1):
time.sleep(.015)
if core.getRemainingImageCount() > 0:
img = core.getLastImage()
cv2.imshow('Video',img)
k = cv2.waitKey(30)
if k==27: # ESC key; may need 255 mask?
break
print 'STOPPED'
core.stopSequenceAcquisition()
def on_close_clicked(b):
if core.isSequenceRunning():
core.stopSequenceAcquisition()
cv2.destroyWindow('Video')
live.on_click(on_live_clicked)
close.on_click(on_close_clicked)
# video with button (CV2)
# serial snap image
live = widgets.Button(description='Live')
close = widgets.Button(description='Close')
display.display(widgets.HBox([live, close]))
def on_live_clicked(b):
display.clear_output(wait=True)
print 'LIVE'
cv2.namedWindow('Video', cv2.WINDOW_NORMAL)
cv2.setWindowProperty('Video', cv2.WND_PROP_ASPECT_RATIO, cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow('Video', 500,500)
img = np.zeros((500,500))
print 'To stop, click window + press ESC'
while(1):
core.snapImage()
time.sleep(.05)
img = core.getImage()
cv2.imshow('Video',img)
k = cv2.waitKey(30)
if k==27: # ESC key; may need 255 mask?
break
print 'STOPPED'
def on_close_clicked(b):
if core.isSequenceRunning():
core.stopSequenceAcquisition()
cv2.destroyWindow('Video')
live.on_click(on_live_clicked)
close.on_click(on_close_clicked)
cv2.destroyAllWindows()
# snap (CV2)
snap = widgets.Button(description='Snap')
close2 = widgets.Button(description='Close')
display.display(widgets.HBox([snap, close2]))
def on_snap_clicked(b):
cv2.destroyWindow('Snap')
cv2.namedWindow('Snap',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Snap', 500,500)
cv2.setWindowProperty('Snap', cv2.WND_PROP_ASPECT_RATIO, cv2.WINDOW_KEEPRATIO)
core.snapImage()
time.sleep(.1)
img = core.getImage()
cv2.imshow('Snap',img)
k = cv2.waitKey(30)
def on_close2_clicked(b):
cv2.destroyWindow('Snap')
snap.on_click(on_snap_clicked)
close2.on_click(on_close2_clicked)
autosipper.exit()
manifold.exit()
core.unloadAllDevices()
core.reset()
print 'closed'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Autosipper
Step2: Manifold
Step3: Micromanager
Step4: Preset
Step5: ACQUISITION
Step6: MM Get info
Step7: Video
Step8: SNAP CV2
Step9: EXIT
|
14,235
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import *
SVG('three_receiver_cal/pics/vnaBlockDiagramForwardRotated.svg')
ls three_receiver_cal/data/
import skrf as rf
%matplotlib inline
from pylab import *
rf.stylely()
raw = rf.read_all_networks('three_receiver_cal/data/')
# list the raw measurements
raw.keys()
thru = raw['thru']
thru
thru.plot_s_db()
from skrf.calibration import TwoPortOnePath
from skrf.media import RectangularWaveguide
from skrf import two_port_reflect as tpr
from skrf import mil
# pull frequency information from measurements
frequency = raw['short'].frequency
# the media object
wg = RectangularWaveguide(frequency=frequency, a=120*mil, z0=50)
# list of 'ideal' responses of the calibration standards
ideals = [wg.short(nports=2),
tpr(wg.delay_short( 90,'deg'), wg.match()),
wg.match(nports=2),
wg.thru()]
# corresponding measurements to the 'ideals'
measured = [raw['short'],
raw['quarter wave delay short'],
raw['load'],
raw['thru']]
# the Calibration object
cal = TwoPortOnePath(measured = measured, ideals = ideals )
Image('three_receiver_cal/pics/asymmetic DUT.jpg', width='75%')
from pylab import *
simulation = raw['simulation']
dutf = raw['wr15 shim and swg (forward)']
dutr = raw['wr15 shim and swg (reverse)']
corrected_full = cal.apply_cal((dutf, dutr))
corrected_partial = cal.apply_cal(dutf)
# plot results
f, ax = subplots(1,2, figsize=(8,4))
ax[0].set_title ('$S_{11}$')
ax[1].set_title ('$S_{21}$')
corrected_partial.plot_s_db(0,0, label='Partial Correction',ax=ax[0])
corrected_partial.plot_s_db(1,0, label='Partial Correction',ax=ax[1])
corrected_full.plot_s_db(0,0, label='Full Correction', ax = ax[0])
corrected_full.plot_s_db(1,0, label='Full Correction', ax = ax[1])
simulation.plot_s_db(0,0,label='Simulation', ax=ax[0], color='k')
simulation.plot_s_db(1,0,label='Simulation', ax=ax[1], color='k')
tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To fully correct an arbitrary two-port, the device must be measured in two orientations, call these forward and reverse. Because there is no switch present, this requires the operator to physically flip the device, and save the pair of measurements. In on-wafer scenarios, one could fabricate two identical devices, one in each orientation. In either case, a pair of measurements are required for each DUT before correction can occur.
Step2: These files can be read by scikit-rf into Networks with the following.
Step3: Each Network can be accessed through the dictionary raw.
Step4: If we look at the raw measurement of the flush thru, it can be seen that only $S_{11}$ and $S_{21}$ contain meaningful data. The other s-parameters are noise.
Step5: Create Calibration
Step6: Apply Correction
Step7: Full Correction ( TwoPortOnePath)
|
14,236
|
<ASSISTANT_TASK:>
Python Code:
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions_list = process_data()
sources = {}
region_name = regions_df.Group
region_name.name = 'region'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_name], axis=1)
sources['_' + str(year)] = ColumnDataSource(new_df)
dictionary_of_sources = dict(zip([x for x in years], ['_%s' % x for x in years]))
js_source_array = str(dictionary_of_sources).replace("'", "")
# Set up the plot
xdr = Range1d(1, 9)
ydr = Range1d(20, 100)
plot = Plot(
x_range=xdr,
y_range=ydr,
plot_width=800,
plot_height=400,
outline_line_color=None,
toolbar_location=None,
min_border=20,
)
AXIS_FORMATS = dict(
minor_tick_in=None,
minor_tick_out=None,
major_tick_in=None,
major_label_text_font_size="10pt",
major_label_text_font_style="normal",
axis_label_text_font_size="10pt",
axis_line_color='#AAAAAA',
major_tick_line_color='#AAAAAA',
major_label_text_color='#666666',
major_tick_line_cap="round",
axis_line_cap="round",
axis_line_width=1,
major_tick_line_width=1,
)
xaxis = LinearAxis(ticker=SingleIntervalTicker(interval=1), axis_label="Children per woman (total fertility)", **AXIS_FORMATS)
yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=20), axis_label="Life expectancy at birth (years)", **AXIS_FORMATS)
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
# Add the year in background (add before circle)
text_source = ColumnDataSource({'year': ['%s' % years[0]]})
text = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')
plot.add_glyph(text_source, text)
# Make a ColorMapper
color_mapper = CategoricalColorMapper(palette=Spectral6, factors=regions_list)
# Add the circle
renderer_source = sources['_%s' % years[0]]
circle_glyph = Circle(
x='fertility', y='life', size='population',
fill_color={'field': 'region', 'transform': color_mapper},
fill_alpha=0.8,
line_color='#7c7e71', line_width=0.5, line_alpha=0.5)
circle_renderer = plot.add_glyph(renderer_source, circle_glyph)
# Add the hover (only against the circle and not other plot elements)
tooltips = "@index"
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))
# We want a legend for the circles. The legend will be populated based on the label='region'
# which is a column of the data source - it will take only the unique values.
plot.add_layout(Legend(items=[LegendItem(label='region', renderers=[circle_renderer])]))
# Add the slider
code =
var year = slider.value,
sources = %s,
new_source_data = sources[year].data;
renderer_source.data = new_source_data;
text_source.data = {'year': [String(year)]};
% js_source_array
callback = CustomJS(args=sources, code=code)
slider = Slider(start=years[0], end=years[-1], value=1, step=1, title="Year", callback=callback)
callback.args["renderer_source"] = renderer_source
callback.args["slider"] = slider
callback.args["text_source"] = text_source
# Stick the plot and the slider together
show(layout([[plot], [slider]], sizing_mode='scale_width'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: sources looks like this
Step2: Build the plot
Step3: Build the axes
Step4: Add the background year text
Step5: Add the bubbles and hover
Step7: Add the slider and callback
Step8: Render together with a slider
|
14,237
|
<ASSISTANT_TASK:>
Python Code:
#import sys
#sys.path.insert(0,'/path/to/pydensecrf/')
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
from scipy.stats import multivariate_normal
H, W, NLABELS = 400, 512, 2
# This creates a gaussian blob...
pos = np.stack(np.mgrid[0:H, 0:W], axis=2)
rv = multivariate_normal([H//2, W//2], (H//4)*(W//4))
probs = rv.pdf(pos)
# ...which we project into the range [0.4, 0.6]
probs = (probs-probs.min()) / (probs.max()-probs.min())
probs = 0.5 + 0.2 * (probs-0.5)
# The first dimension needs to be equal to the number of classes.
# Let's have one "foreground" and one "background" class.
# So replicate the gaussian blob but invert it to create the probability
# of the "background" class to be the opposite of "foreground".
probs = np.tile(probs[np.newaxis,:,:],(2,1,1))
probs[1,:,:] = 1 - probs[0,:,:]
# Let's have a look:
plt.figure(figsize=(15,5))
plt.subplot(1,2,1); plt.imshow(probs[0,:,:]); plt.title('Foreground probability'); plt.axis('off'); plt.colorbar();
plt.subplot(1,2,2); plt.imshow(probs[1,:,:]); plt.title('Background probability'); plt.axis('off'); plt.colorbar();
# Inference without pair-wise terms
U = unary_from_softmax(probs) # note: num classes is first dim
d = dcrf.DenseCRF2D(W, H, NLABELS)
d.setUnaryEnergy(U)
# Run inference for 10 iterations
Q_unary = d.inference(10)
# The Q is now the approximate posterior, we can get a MAP estimate using argmax.
map_soln_unary = np.argmax(Q_unary, axis=0)
# Unfortunately, the DenseCRF flattens everything, so get it back into picture form.
map_soln_unary = map_soln_unary.reshape((H,W))
# And let's have a look.
plt.imshow(map_soln_unary); plt.axis('off'); plt.title('MAP Solution without pairwise terms');
NCHAN=1
# Create simple image which will serve as bilateral.
# Note that we put the channel dimension last here,
# but we could also have it be the first dimension and
# just change the `chdim` parameter to `0` further down.
img = np.zeros((H,W,NCHAN), np.uint8)
img[H//3:2*H//3,W//4:3*W//4,:] = 1
plt.imshow(img[:,:,0]); plt.title('Bilateral image'); plt.axis('off'); plt.colorbar();
# Create the pairwise bilateral term from the above image.
# The two `s{dims,chan}` parameters are model hyper-parameters defining
# the strength of the location and image content bilaterals, respectively.
pairwise_energy = create_pairwise_bilateral(sdims=(10,10), schan=(0.01,), img=img, chdim=2)
# pairwise_energy now contains as many dimensions as the DenseCRF has features,
# which in this case is 3: (x,y,channel1)
img_en = pairwise_energy.reshape((-1, H, W)) # Reshape just for plotting
plt.figure(figsize=(15,5))
plt.subplot(1,3,1); plt.imshow(img_en[0]); plt.title('Pairwise bilateral [x]'); plt.axis('off'); plt.colorbar();
plt.subplot(1,3,2); plt.imshow(img_en[1]); plt.title('Pairwise bilateral [y]'); plt.axis('off'); plt.colorbar();
plt.subplot(1,3,3); plt.imshow(img_en[2]); plt.title('Pairwise bilateral [c]'); plt.axis('off'); plt.colorbar();
d = dcrf.DenseCRF2D(W, H, NLABELS)
d.setUnaryEnergy(U)
d.addPairwiseEnergy(pairwise_energy, compat=10) # `compat` is the "strength" of this potential.
# This time, let's do inference in steps ourselves
# so that we can look at intermediate solutions
# as well as monitor KL-divergence, which indicates
# how well we have converged.
# PyDenseCRF also requires us to keep track of two
# temporary buffers it needs for computations.
Q, tmp1, tmp2 = d.startInference()
for _ in range(5):
d.stepInference(Q, tmp1, tmp2)
kl1 = d.klDivergence(Q) / (H*W)
map_soln1 = np.argmax(Q, axis=0).reshape((H,W))
for _ in range(20):
d.stepInference(Q, tmp1, tmp2)
kl2 = d.klDivergence(Q) / (H*W)
map_soln2 = np.argmax(Q, axis=0).reshape((H,W))
for _ in range(50):
d.stepInference(Q, tmp1, tmp2)
kl3 = d.klDivergence(Q) / (H*W)
map_soln3 = np.argmax(Q, axis=0).reshape((H,W))
img_en = pairwise_energy.reshape((-1, H, W)) # Reshape just for plotting
plt.figure(figsize=(15,5))
plt.subplot(1,3,1); plt.imshow(map_soln1);
plt.title('MAP Solution with DenseCRF\n(5 steps, KL={:.2f})'.format(kl1)); plt.axis('off');
plt.subplot(1,3,2); plt.imshow(map_soln2);
plt.title('MAP Solution with DenseCRF\n(20 steps, KL={:.2f})'.format(kl2)); plt.axis('off');
plt.subplot(1,3,3); plt.imshow(map_soln3);
plt.title('MAP Solution with DenseCRF\n(75 steps, KL={:.2f})'.format(kl3)); plt.axis('off');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unary Potential
Step2: Run inference with unary potential
Step3: Pairwise terms
Step4: Run inference of complete DenseCRF
|
14,238
|
<ASSISTANT_TASK:>
Python Code:
# Author: Jussi Nurminen (jnu@iki.fi)
#
# License: BSD (3-clause)
import mne
import os
from mne.datasets import multimodal
fname_raw = os.path.join(multimodal.data_path(), 'multimodal_raw.fif')
print(__doc__)
raw = mne.io.read_raw_fif(fname_raw)
print(raw.acqparser)
cond = raw.acqparser.get_condition(raw, 'Auditory right')
epochs = mne.Epochs(raw, **cond)
epochs.average().plot_topo(background_color='w')
evokeds = []
for cat in raw.acqparser.categories:
cond = raw.acqparser.get_condition(raw, cat)
# copy (supported) rejection parameters from DACQ settings
epochs = mne.Epochs(raw, reject=raw.acqparser.reject,
flat=raw.acqparser.flat, **cond)
evoked = epochs.average()
evoked.comment = cat['comment']
evokeds.append(evoked)
# save all averages to an evoked fiff file
# fname_out = 'multimodal-ave.fif'
# mne.write_evokeds(fname_out, evokeds)
newcat = dict()
newcat['comment'] = 'Visual lower left, longer epochs'
newcat['event'] = 3 # reference event
newcat['start'] = -.2 # epoch start rel. to ref. event (in seconds)
newcat['end'] = .7 # epoch end
newcat['reqevent'] = 0 # additional required event; 0 if none
newcat['reqwithin'] = .5 # ...required within .5 sec (before or after)
newcat['reqwhen'] = 2 # ...required before (1) or after (2) ref. event
newcat['index'] = 9 # can be set freely
cond = raw.acqparser.get_condition(raw, newcat)
epochs = mne.Epochs(raw, reject=raw.acqparser.reject,
flat=raw.acqparser.flat, **cond)
epochs.average().plot(time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read raw file
Step2: Check DACQ defined averaging categories and other info
Step3: Extract epochs corresponding to a category
Step4: Get epochs from all conditions, average
Step5: Make a new averaging category
|
14,239
|
<ASSISTANT_TASK:>
Python Code:
d = pq('<span><p class="hello">Hi</p><p>Bye</p></span>')
for each in d.children():
print each.text
r = requests.get(sampleurl1)
r.raise_for_status()
r.content
blurb = pq(r.content)
for detail in blurb('dt'):
print detail.text
#blurb().text()
r = requests.get(sampleurl2)
r.raise_for_status()
r.content
blurb = pq(r.content)
for detail in blurb('dt'):
print detail.text
r = requests.get(sampleurl3)
r.raise_for_status()
blurb = pq('<document>' + r.content + '</document>')
for detail in blurb('dt'):
print detail.text
#print blurb.children()
for detail in blurb('dt'):
print detail.text()
r = requests.get(sampleurl4)
r.raise_for_status()
#r.content
blurb = pq(r.content)
#print blurb.children().children()
for each in blurb.children().children():
print each.text
#print blurb
#for detail in blurb('dt'):
# print detail.text
# print detail.keys
r = requests.get(sampleurl4)
r.raise_for_status()
soup = BeautifulSoup(r.content)
soup
dt_items = soup.find_all("dt")
dd_items = soup.find_all("dd")
for each in dt_items:
print each.get_text()
for each in dd_items:
print "ITEM:", each.get_text()
dt_texts = []
dd_texts = []
texts = {}
for each in dt_items:
dt_texts.append(each.text)
for each in dd_items:
dd_texts.append(each.text.replace('\n',''))
dt_texts
dd_texts
texts = {}
for i in range(0, len(dt_texts)):
texts[dt_texts[i]] = dd_texts[i]
texts
dt_texts = []
dd_texts = []
for each in dt_items:
dt_texts.append(each.text)
for each in dd_items:
dd_texts.append(each.text.strip('\n').strip(' ').strip('\n'))
dd_texts
extended_stuff = {}
for i in range(0, len(dt_texts)):
extended_stuff[dt_texts[i]] = dd_texts[i]
extended_stuff
if 'Effort' in extended_stuff:
effort_loc = extended_stuff['Effort'].find('/') - 1
extended_stuff['Effort'] = extended_stuff['Effort'][effort_loc]
if 'Weather' in extended_stuff:
extended_stuff['Weather'] = extended_stuff['Weather'].replace('\n',' ')
extended_stuff
# using sampleurl4
r = requests.get(sampleurl4)
r.raise_for_status()
#print r.content
blurb = pq(r.content)
#print blurb.contents()
for detail in blurb.children():
if detail.text == 'Gear':
print "GEAR!"
else:
print "not gear"
#for item in detail.items():
#print "ITEM:", detail.html()
#print "BLEH:",blurb.items('dt').closest('dd')
#print "ITEM:", detail.closest('dd')
#print "ITEM:",detail.children()
#for detail in blurb('dt'):
# if detail.text == 'Gear':
# for each in detail.iterkeys():
# print each
# print "."
#print detail.keys()
r = requests.get(url,timeout=5)
r.raise_for_status()
r.content
doc = pq(r.content)
doc('li.current-rating').text()
doc('ul.keyword_list').text()
doc('dt')
gear = ''
for detail in doc('dt'):
if detail.text == 'Gear':
gear = doc.find('span').text()
gear
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sample
Step2: again
Step3: pyqyery get Details sample
|
14,240
| null | null |
14,241
|
<ASSISTANT_TASK:>
Python Code:
import pickle
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
data_dict = pickle.load(open("../final_project/final_project_dataset.pkl", "r") )
features_list = ["poi", "salary"]
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import train_test_split
features_train, features_test, labels_train, labels_test = train_test_split(features,labels,test_size=0.3,random_state=42)
clf = DecisionTreeClassifier()
clf.fit(features_train,labels_train)
pred = clf.predict(features_test)
# ref http://stackoverflow.com/questions/10741346
import numpy as np
unique, counts = np.unique(labels_test, return_counts=True)
print "true labels"
print np.asarray((unique, counts)).T
print "predicted labels"
unique, counts = np.unique(pred, return_counts=True)
print np.asarray((unique, counts)).T
print "number of true positives:",sum((labels_test==1) & (pred ==1))
from sklearn.metrics import precision_score, recall_score
print "precision:",precision_score(labels_test,pred)
print "recall:",recall_score(labels_test,pred)
predictions = np.array([0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1])
true_labels = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0])
print "number of true positives:",sum((true_labels==1) & (predictions==1))
print "number of false positives:",sum((true_labels==0) & (predictions==1))
print "number of true negatives:",sum((true_labels==0) & (predictions==0))
print "number of false negatives:",sum((true_labels==1) & (predictions==0))
print "precision:", 6/(6+3.)
print "recall:", 6/(6+2.)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Training a decision tree on this starter data
Step2: Counts of actual and predicted values
Step3: Which turn out to match up very poorly. No true positives. Just guessing 0 for everyone would in fact be more accurate.
Step4: Precision and Recall
Step5: These are not even slightly good news
Step6: Same thing with some fake data for comparison
|
14,242
|
<ASSISTANT_TASK:>
Python Code:
# For use in Quantopian Research, exploring interactively
from quantopian.interactive.data.quandl import cboe_vvix as dataset
# import data operations
from odo import odo
# import other libraries we will use
import pandas as pd
# Let's use blaze to understand the data a bit using Blaze dshape()
dataset.dshape
# And how many rows are there?
# N.B. we're using a Blaze function to do this, not len()
dataset.count()
# Let's see what the data looks like. We'll grab the first three rows.
dataset[:3]
# Plotting this DataFrame since 2007
df = odo(dataset, pd.DataFrame)
df.head(5)
# So we can plot it, we'll set the index as the `asof_date`
df['asof_date'] = pd.to_datetime(df['asof_date'])
df = df.set_index(['asof_date'])
df.head(5)
# Plotting the VVIX
import matplotlib.pyplot as plt
df.vvix.plot(label=str(dataset))
plt.ylabel(str(dataset))
plt.legend()
plt.title("Graphing %s since %s" % (str(dataset), min(df.index)))
# Import necessary Pipeline modules
from quantopian.pipeline import Pipeline
from quantopian.research import run_pipeline
from quantopian.pipeline.factors import AverageDollarVolume
# Import the datasets available
from quantopian.pipeline.data.quandl import cboe_vvix
print "Here are the list of available fields per dataset:"
print "---------------------------------------------------\n"
def _print_fields(dataset):
print "Dataset: %s\n" % dataset.__name__
print "Fields:"
for field in list(dataset.columns):
print "%s - %s" % (field.name, field.dtype)
print "\n"
_print_fields(cboe_vvix)
print "---------------------------------------------------\n"
pipe = Pipeline()
pipe.add(cboe_vvix.vvix.latest, 'vvix')
# Setting some basic liquidity strings (just for good habit)
dollar_volume = AverageDollarVolume(window_length=20)
top_1000_most_liquid = dollar_volume.rank(ascending=False) < 1000
pipe.set_screen(top_1000_most_liquid & cboe_vvix.vvix.latest.notnan())
# The show_graph() method of pipeline objects produces a graph to show how it is being calculated.
pipe.show_graph(format='png')
# run_pipeline will show the output of your pipeline
pipe_output = run_pipeline(pipe, start_date='2013-11-01', end_date='2013-11-25')
pipe_output
# This section is only importable in the backtester
from quantopian.algorithm import attach_pipeline, pipeline_output
# General pipeline imports
from quantopian.pipeline import Pipeline
from quantopian.pipeline.factors import AverageDollarVolume
# For use in your algorithms via the pipeline API
from quantopian.pipeline.data.quandl import cboe_vvix
def make_pipeline():
# Create our pipeline
pipe = Pipeline()
# Screen out penny stocks and low liquidity securities.
dollar_volume = AverageDollarVolume(window_length=20)
is_liquid = dollar_volume.rank(ascending=False) < 1000
# Create the mask that we will use for our percentile methods.
base_universe = (is_liquid)
# Add the datasets available
pipe.add(cboe_vvix.vvix.latest, 'vvix')
# Set our pipeline screens
pipe.set_screen(is_liquid)
return pipe
def initialize(context):
attach_pipeline(make_pipeline(), "pipeline")
def before_trading_start(context, data):
results = pipeline_output('pipeline')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's go over the columns
Step2: <a id='pipeline'></a>
Step3: Now that we've imported the data, let's take a look at which fields are available for each dataset.
Step4: Now that we know what fields we have access to, let's see what this data looks like when we run it through Pipeline.
Step5: Here, you'll notice that each security is mapped to VVIX. So you could grab any security to obtain the value of VVIX.
|
14,243
|
<ASSISTANT_TASK:>
Python Code:
help('learning_lab.03_interface_properties')
from importlib import import_module
script = import_module('learning_lab.03_interface_properties')
from inspect import getsource
print(getsource(script.main))
print(getsource(script.demonstrate))
run ../learning_lab/03_interface_properties.py
from basics.odl_http import http_history
from basics.http import http_history_to_html
from IPython.core.display import HTML
HTML(http_history_to_html(http_history()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementation
Step2: Execution
Step3: HTTP
|
14,244
|
<ASSISTANT_TASK:>
Python Code:
enunciado = list([r'\frac{7!}{6!}',r'\frac{{8!}}{{9!}}',r'\frac{{9!}}{{5!\cdot 4!}}',r'\frac{{m!}}{{(m - 1)!}}', r'\frac{{( {m + 1} )!}}{{( {m - 1} )!}}'])
enunciado
enunciado = list([r'\frac{7!}{6!}',r'\frac{{8!}}{{9!}}',r'\frac{{9!}}{{5!\cdot 4!}}',r'\frac{{m!}}{{(m - 1)!}}', r'\frac{{( {m + 1} )!}}{{( {m - 1} )!}}'])
enunciado_sympy=[]
for i in enunciado :
enunciado_sympy.append(parse_latex(i));
enunciado_sympy
for i in range(len(enunciado_sympy)) :
display(md("$"+enunciado[i]+" \\rightarrow "+latex(simplify(enunciado_sympy[i]))+"$"))
from sympy.functions.combinatorial.numbers import nC, nP, nT
nC(5,3)
from sympy import *
expr = sympify("nC(5,3)")
display(expr.expand())
enunciado = [[252,250], [25,3], [25,4]]
for i in range(len(enunciado)):
display(nC(enunciado[i][0],enunciado[i][1]))
nC(enunciado[0][0],enunciado[0][1])
factorial(252)/(factorial(250)*factorial(2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ejercicio
|
14,245
|
<ASSISTANT_TASK:>
Python Code:
__author__ = 'shivam_gaur'
import requests
from bs4 import BeautifulSoup
import re
from pymongo import MongoClient
# Global Config Variables
client_key = '&key=<insert_your_39_character_api_key_here>'
_URL_ = 'https://maps.googleapis.com/maps/api/geocode/xml?address='
count = 0
# same helper function as the Flight Crash Data Crawler
def makeBeautifulSoupObject(url):
# Use a `Session` instance to customize how `requests` handles making HTTP requests.
session = requests.Session()
# `mount` a custom adapter that retries failed connections for HTTP and HTTPS requests, in this case- 5 times
session.mount("http://", requests.adapters.HTTPAdapter(max_retries=5))
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=5))
source_code = session.get(url=url)
plain_text = source_code.text.encode('utf8')
soup = BeautifulSoup(plain_text, "lxml")
return soup
# Connecting to Mongo instance
client = MongoClient()
# specify the name of the db in brackets
db = client['aircrashdb']
# specify the name of the collection in brackets
collection = db['crawled_data']
def Request_and_append(address_field):
print (address_field)
print ('\n')
finalurl = _URL_ + address_field + client_key_sohail
soup = makeBeautifulSoupObject(finalurl)
lat_ = soup.find_all('lat')
long_ = soup.findAll('lng')
collection.find_one_and_update({'_id':cur["_id"]},{'$set':{'geolat':lat_[0].string}})
collection.find_one_and_update({'_id':cur["_id"]},{'$set':{'geolong':long_[0].string}})
print (lat_[0].string + ' & ' + long_[0].string + ' - DONE. \n')
# for all the records in the collection
cursor = collection.find()
for cur in cursor:
print(cur["loc"])
if not cur["loc"] =='NULL':
# if the latitude and logitude of aircrash location do not exist
if not "geolat" in cur or not "geolong" in cur:
try:
if not cur['loc'] == 'NULL':
address_field = '+'.join(cur['loc'].split(' '))
Request_and_append(address_field)
count = count + 1
else:
print ("NULL- No Route Field")
except:
print ("COULD NOT PROCESS " + cur['loc'].encode('utf-8'))
new_attempt1 = cur['loc'].encode('utf-8').rpartition(',')[-1]
print ('trying : ' + new_attempt1)
try:
address_field = '+'.join(new_attempt1.encode('utf-8').strip().split(' '))
Request_and_append(address_field)
except:
print ('New attempt has failed as well')
new_attempt2 = cur['loc'].encode('utf-8')
new_attempt2 = re.sub('[^0-9a-zA-Z ]+', '', new_attempt2)
arr = new_attempt2.split()
try:
i=0
for s in arr:
if (s.lower() == 'coast'):
new_attempt_final = (arr [i-1] + ' ' + arr[i]).encode('utf-8')
address_field = '+'.join(new_attempt_final.encode('utf-8').strip().split(' '))
Request_and_append(address_field)
break
elif (s.lower() == 'ocean'):
new_attempt_final = (arr [i-1] + ' ' + arr[i]).encode('utf-8')
address_field = '+'.join(new_attempt_final.encode('utf-8').strip().split(' '))
Request_and_append(address_field)
break
elif (s.lower() == 'sea'):
new_attempt_final = (arr [i-1] + ' ' + arr[i]).encode('utf-8')
address_field = '+'.join(new_attempt_final.encode('utf-8').strip().split(' '))
Request_and_append(address_field)
break
elif (s.lower() == 'off'):
new_attempt_final = (' '.join(arr [i+1:])).encode('utf-8')
address_field = '+'.join(new_attempt_final.encode('utf-8').strip().split(' '))
Request_and_append(address_field)
break
elif (s.lower() == 'persian'): # For persian gulf
new_attempt_final = (arr [i] + ' ' + arr[i+1]).encode('utf-8')
address_field = '+'.join(new_attempt_final.encode('utf-8').strip().split(' '))
Request_and_append(address_field)
break
elif (s.lower() == 'gulf'):
new_attempt_final = (arr [i] + ' ' + arr[i+1]+ ' ' + arr[i+2]).encode('utf-8')
address_field = '+'.join(new_attempt_final.encode('utf-8').strip().split(' '))
Request_and_append(address_field)
break
else:
new_attempt_final = arr [-1]
address_field = '+'.join(new_attempt_final.encode('utf-8').strip().split(' '))
Request_and_append(address_field)
i = i+1
i=0
except:
print ("I AM SORRY, THIS LOCATION CANNOT BE PROCESSED")
else:
# if the latitude and logitude of aircrash location ALREADY EXIST. This is in case this code block is run multiple times.
count = count + 1
print (cur['loc'].encode('utf-8')+' - ALREADY PROCESSED')
else:
print("ROUTE ===== NULL")
print (" TOTAL RECORDS THAT HAVE LATS AND LONGS: " + str(count))
counter = 0
for cur in cursor:
print(cur["route"])
if not cur["route"]=='NULL':
if not "srclat" in cur and not "srclong" in cur or not "deslat" in cur and not "deslong" in cur:
try:
if not cur['route'] == 'NULL':
source_dest = cur["route"].split('-')
source_dest[0] = source_dest[0].strip()
source_dest[-1] = source_dest[-1].strip()
address_field1 = ' '.join(source_dest[0].split(' '))
print (address_field1)
address_field2 = ' '.join(source_dest[-1].split(' '))
print (address_field2)
print ('\n')
finalurl1 = url + address_field1 + client_key_sohail
finalurl2 = url + address_field2 + client_key_sohail
soup1 = makeBeautifulSoupObject(finalurl1)
soup2 = makeBeautifulSoupObject(finalurl2)
srclat = soup1.find_all('lat')
srclong = soup1.findAll('lng')
deslat = soup2.find_all('lat')
deslong = soup2.find_all('lng')
collection.find_one_and_update({'_id':cur["_id"]},{'$set':{'srclat':srclat[0].string}})
collection.find_one_and_update({'_id':cur["_id"]},{'$set':{'srclong':srclong[0].string}})
collection.find_one_and_update({'_id':cur["_id"]},{'$set':{'deslat':deslat[0].string}})
collection.find_one_and_update({'_id':cur["_id"]},{'$set':{'deslong':deslong[0].string}})
print (srclat[0].string)
print (srclong[0].string)
print (deslat[0].string)
print (deslong[0].string)
counter = counter +1
else:
print ("NULL- No Route Field")
except:
print ("COULD NOT PROCESS " + cur['route'].encode('utf-8'))
else:
print ("ALREADY PROCESSED: " + cur['route'].encode('utf-8'))
counter = counter +1
else:
print("ROUTE == NULL")
print ('TOTAL COUNTER: ' + str(counter))
# Importing the required libraries
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
import xml.etree.ElementTree as etree
import xml.dom.minidom
root = Element('root')
tree = ElementTree(root)
for cur in cursor:
if "geolat" in cur and "geolong" in cur:
element = Element('element')
root.append(element)
date = Element('date')
date.text= str(cur['date'])
element.append(date)
lat = Element('lat')
lat.text= cur['geolat']
element.append(lat)
long = Element('long')
long.text= cur['geolong']
element.append(long)
fatal = Element('fatal')
if not cur['fatalities_total'] == 'NULL' and not cur['ground'] == 'NULL':
total_fatalities = int(cur['fatalities_total']) + int(cur['ground'])
fatal.text= str(total_fatalities)
elif cur['fatalities_total'] == 'NULL':
fatal.text= cur['ground']
elif cur['ground'] == 'NULL':
fatal.text= cur['fatalities_total']
else:
fatal.text= cur['fatalities_total']
element.append(fatal)
xml = xml.dom.minidom.parseString(etree.tostring(root))
pretty_xml_as_string = xml.toprettyxml()
print (pretty_xml_as_string)
with open(r'C:\Users\admin\Desktop\GE2324\crash_location_data_with_total_fatal.xml', "wb") as f:
f.write(pretty_xml_as_string.encode('utf-8'))
cursor = collection.find()
root = Element('root')
tree = ElementTree(root)
for cur in cursor:
if "srclat" in cur and "srclong" in cur and "deslat" in cur and "deslong" in cur:
element = Element('element')
root.append(element)
srclat = Element('srclat')
srclat.text= cur['srclat']
element.append(srclat)
srclong = Element('srclong')
srclong.text= cur['srclong']
element.append(srclong)
deslat = Element('deslat')
deslat.text= cur['deslat']
element.append(deslat)
deslong = Element('deslong')
deslong.text= cur['deslong']
element.append(deslong)
xml = xml.dom.minidom.parseString(etree.tostring(root))
pretty_xml_as_string = xml.toprettyxml()
print (pretty_xml_as_string)
with open('route_data.xml', "wb") as f:
f.write(pretty_xml_as_string.encode('utf-8'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Declaring the important helper functions and global variables
Step2: Connecting to the Mongo DB client running on the same machine.
Step3: Helper Function to send request (to url- address_field) and append to the MongoDB collection
Step4: Extracting latitude and longitude data of the aircrash location, and appending to the MongoDB collection
Step5: Extracting latitude and longitude data of the source and destination, and appending to the MongoDB collection
Step6: Code for churning out XML files, that are used for visualisation purposes on the web app.
Step7: Generating XML File with the following Schema
Step8: This is what the XML should look like
|
14,246
|
<ASSISTANT_TASK:>
Python Code:
# setup SymPy
from sympy import *
x, y, z, t = symbols('x y z t')
init_printing()
# define the matrices A and B, and the vecs v and w
A = Matrix([[1,3],
[4,5]])
B = Matrix([[-1,0],
[ 3,3]])
v = Matrix([[1,2]]).T # the .T makes v a column vector
w = Matrix([[-3,-4]]).T
# a)
A*v
# b)
B*v
# c)
A*B*v
# d)
B*A*v
# e)
A*w
# f)
B*w
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Definitions
|
14,247
|
<ASSISTANT_TASK:>
Python Code:
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
from collections import Counter
import numpy as np
# Create three Counter objects to store positive, negative and total counts
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
# TODO: Loop over all the words in all the reviews and increment the counts in the appropriate counter objects
# Examine the counts of the most common words in positive reviews
positive_counts.most_common()
# Examine the counts of the most common words in negative reviews
negative_counts.most_common()
# Create Counter object to store positive/negative ratios
pos_neg_ratios = Counter()
# TODO: Calculate the ratios of positive and negative uses of the most common words
# Consider words to be "common" if they've been used at least 100 times
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"]))
print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"]))
print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
# TODO: Convert ratios to logs
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"]))
print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"]))
print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
# Note: Above is the code Andrew uses in his solution video,
# so we've included it here to avoid confusion.
# If you explore the documentation for the Counter class,
# you will see you could also find the 30 least common
# words like this: pos_neg_ratios.most_common()[:-31:-1]
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
# TODO: Create set named "vocab" containing all of the words from all of the reviews
vocab = None
vocab_size = len(vocab)
print(vocab_size)
from IPython.display import Image
Image(filename='sentiment_network_2.png')
# TODO: Create layer_0 matrix with dimensions 1 by vocab_size, initially filled with zeros
layer_0 = None
layer_0.shape
from IPython.display import Image
Image(filename='sentiment_network.png')
# Create a dictionary of words in the vocabulary mapped to index positions
# (to be used in layer_0)
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
# display the map of words to indices
word2index
def update_input_layer(review):
Modify the global layer_0 to represent the vector form of review.
The element at a given index of layer_0 should represent
how many times the given word occurs in the review.
Args:
review(string) - the string of the review
Returns:
None
global layer_0
# clear out previous state by resetting the layer to be all 0s
layer_0 *= 0
# TODO: count how many times each word is used in the given review and store the results in layer_0
update_input_layer(reviews[0])
layer_0
def get_target_for_label(label):
Convert a label to `0` or `1`.
Args:
label(string) - Either "POSITIVE" or "NEGATIVE".
Returns:
`0` or `1`.
# TODO: Your code here
labels[0]
get_target_for_label(labels[0])
labels[1]
get_target_for_label(labels[1])
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):
Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
# Assign a seed to our random number generator to ensure we get
# reproducible results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
# TODO: populate review_vocab with all of the words in the given reviews
# Remember to split reviews into individual words
# using "split(' ')" instead of "split()".
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
label_vocab = set()
# TODO: populate label_vocab with all of the words in the given labels.
# There is no need to split the labels because each one is a single word.
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {}
# TODO: populate self.word2index with indices for all the words in self.review_vocab
# like you saw earlier in the notebook
# Create a dictionary of labels mapped to index positions
self.label2index = {}
# TODO: do the same thing you did for self.word2index and self.review_vocab,
# but for self.label2index and self.label_vocab instead
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Initialize weights
# TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between
# the input layer and the hidden layer.
self.weights_0_1 = None
# TODO: initialize self.weights_1_2 as a matrix of random values.
# These are the weights between the hidden layer and the output layer.
self.weights_1_2 = None
# TODO: Create the input layer, a two-dimensional matrix with shape
# 1 x input_nodes, with all values initialized to zero
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# TODO: You can copy most of the code you wrote for update_input_layer
# earlier in this notebook.
#
# However, MAKE SURE YOU CHANGE ALL VARIABLES TO REFERENCE
# THE VERSIONS STORED IN THIS OBJECT, NOT THE GLOBAL OBJECTS.
# For example, replace "layer_0 *= 0" with "self.layer_0 *= 0"
pass
def get_target_for_label(self,label):
# TODO: Copy the code you wrote for get_target_for_label
# earlier in this notebook.
pass
def sigmoid(self,x):
# TODO: Return the result of calculating the sigmoid activation function
# shown in the lectures
pass
def sigmoid_output_2_derivative(self,output):
# TODO: Return the derivative of the sigmoid activation function,
# where "output" is the original output from the sigmoid fucntion
pass
def train(self, training_reviews, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews) == len(training_labels))
# Keep track of correct predictions to display accuracy during training
correct_so_far = 0
# Remember when we started for printing time statistics
start = time.time()
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i in range(len(training_reviews)):
# TODO: Get the next review and its correct label
# TODO: Implement the forward pass through the network.
# That means use the given review to update the input layer,
# then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Do not use an activation function for the hidden layer,
# but use the sigmoid activation function for the output layer.
# TODO: Implement the back propagation pass here.
# That means calculate the error for the forward pass's prediction
# and update the weights in the network according to their
# contributions toward the error, as calculated via the
# gradient descent and back propagation algorithms you
# learned in class.
# TODO: Keep track of correct predictions. To determine if the prediction was
# correct, check that the absolute value of the output error
# is less than 0.5. If so, add one to the correct_so_far count.
# For debug purposes, print out our prediction accuracy and speed
# throughout the training process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \
+ " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the prediction process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct) + " #Tested:" + str(i+1) \
+ " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
Returns a POSITIVE or NEGATIVE prediction for the given review.
# TODO: Run a forward pass through the network, like you did in the
# "train" function. That means use the given review to
# update the input layer, then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Note: The review passed into this function for prediction
# might come from anywhere, so you should convert it
# to lower case prior to using it.
# TODO: The output layer should now contain a prediction.
# Return `POSITIVE` for predictions greater-than-or-equal-to `0.5`,
# and `NEGATIVE` otherwise.
pass
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.test(reviews[-1000:],labels[-1000:])
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
mlp.train(reviews[:-1000],labels[:-1000])
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
# TODO: -Copy the SentimentNetwork class from Projet 3 lesson
# -Modify it to reduce noise, like in the video
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:])
Image(filename='sentiment_network_sparse.png')
layer_0 = np.zeros(10)
layer_0
layer_0[4] = 1
layer_0[9] = 1
layer_0
weights_0_1 = np.random.randn(10,5)
layer_0.dot(weights_0_1)
indices = [4,9]
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (1 * weights_0_1[index])
layer_1
Image(filename='sentiment_network_sparse_2.png')
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (weights_0_1[index])
layer_1
# TODO: -Copy the SentimentNetwork class from Project 4 lesson
# -Modify it according to the above instructions
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:])
Image(filename='sentiment_network_sparse_2.png')
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, show, output_file
from bokeh.io import output_notebook
output_notebook()
hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="Word Positive/Negative Affinity Distribution")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
frequency_frequency = Counter()
for word, cnt in total_counts.most_common():
frequency_frequency[cnt] += 1
hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="The frequency distribution of the words in our corpus")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
# TODO: -Copy the SentimentNetwork class from Project 5 lesson
# -Modify it according to the above instructions
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:])
mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01)
mlp_full.train(reviews[:-1000],labels[:-1000])
Image(filename='sentiment_network_sparse.png')
def get_most_similar_words(focus = "horrible"):
most_similar = Counter()
for word in mlp_full.word2index.keys():
most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],mlp_full.weights_0_1[mlp_full.word2index[focus]])
return most_similar.most_common()
get_most_similar_words("excellent")
get_most_similar_words("terrible")
import matplotlib.colors as colors
words_to_visualize = list()
for word, ratio in pos_neg_ratios.most_common(500):
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
pos = 0
neg = 0
colors_list = list()
vectors_list = list()
for word in words_to_visualize:
if word in pos_neg_ratios.keys():
vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]])
if(pos_neg_ratios[word] > 0):
pos+=1
colors_list.append("#00ff00")
else:
neg+=1
colors_list.append("#000000")
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
words_top_ted_tsne = tsne.fit_transform(vectors_list)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="vector T-SNE for most polarized words")
source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],
x2=words_top_ted_tsne[:,1],
names=words_to_visualize,
color=colors_list))
p.scatter(x="x1", y="x2", size=8, source=source, fill_color="color")
word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
p.add_layout(word_labels)
show(p)
# green indicates positive words, black indicates negative words
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Lesson
Step3: Project 1
Step4: We'll create three Counter objects, one for words from positive reviews, one for words from negative reviews, and one for all the words.
Step5: TODO
Step6: Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used.
Step7: As you can see, common words like "the" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the ratios of word usage between positive and negative reviews.
Step8: Examine the ratios you've calculated for a few words
Step9: Looking closely at the values you just calculated, we see the following
Step10: Examine the new ratios you've calculated for the same words from before
Step11: If everything worked, now you should see neutral words with values close to zero. In this case, "the" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at "amazing"'s ratio - it's above 1, showing it is clearly a word with positive sentiment. And "terrible" has a similar score, but in the opposite direction, so it's below -1. It's now clear that both of these words are associated with specific, opposing sentiments.
Step12: End of Project 1.
Step13: Project 2
Step14: Run the following cell to check your vocabulary size. If everything worked correctly, it should print 74074
Step15: Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. layer_0 is the input layer, layer_1 is a hidden layer, and layer_2 is the output layer.
Step16: TODO
Step17: Run the following cell. It should display (1, 74074)
Step18: layer_0 contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word.
Step20: TODO
Step21: Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in layer_0.
Step23: TODO
Step24: Run the following two cells. They should print out'POSITIVE' and 1, respectively.
Step25: Run the following two cells. They should print out 'NEGATIVE' and 0, respectively.
Step29: End of Project 2.
Step30: Run the following cell to create a SentimentNetwork that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of 0.1.
Step31: Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set).
Step32: Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing.
Step33: That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, 0.01, and then train the new network.
Step34: That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, 0.001, and then train the new network.
Step35: With a learning rate of 0.001, the network should finall have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson.
Step36: Project 4
Step37: Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of 0.1.
Step38: That should have trained much better than the earlier attempts. It's still not wonderful, but it should have improved dramatically. Run the following cell to test your model with 1000 predictions.
Step39: End of Project 4.
Step40: Project 5
Step41: Run the following cell to recreate the network and train it once again.
Step42: That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions.
Step43: End of Project 5.
Step44: Project 6
Step45: Run the following cell to train your network with a small polarity cutoff.
Step46: And run the following cell to test it's performance. It should be
Step47: Run the following cell to train your network with a much larger polarity cutoff.
Step48: And run the following cell to test it's performance.
Step49: End of Project 6.
|
14,248
|
<ASSISTANT_TASK:>
Python Code:
# importando el modulo de regex de python
import re
# compilando la regex
patron = re.compile(r'\bfoo\b') # busca la palabra foo
# texto de entrada
texto = bar foo bar
foo barbarfoo
foofoo foo bar
# match nos devuelve None porque no hubo coincidencia al comienzo del texto
print(patron.match(texto))
# match encuentra una coindencia en el comienzo del texto
m = patron.match('foo bar')
m
# search nos devuelve la coincidencia en cualquier ubicacion.
s = patron.search(texto)
s
# findall nos devuelve una lista con todas las coincidencias
fa = patron.findall(texto)
fa
# finditer nos devuelve un iterador
fi = patron.finditer(texto)
fi
# iterando por las distintas coincidencias
next(fi)
next(fi)
# Métodos del objeto de coincidencia
m.group(), m.start(), m.end(), m.span()
s.group(), s.start(), s.end(), s.span()
# texto de entrada
becquer = Podrá nublarse el sol eternamente;
Podrá secarse en un instante el mar;
Podrá romperse el eje de la tierra
como un débil cristal.
¡todo sucederá! Podrá la muerte
cubrirme con su fúnebre crespón;
Pero jamás en mí podrá apagarse
la llama de tu amor.
# patron para dividir donde no encuentre un caracter alfanumerico
patron = re.compile(r'\W+')
palabras = patron.split(becquer)
palabras[:10] # 10 primeras palabras
# Utilizando la version no compilada de split.
re.split(r'\n', becquer) # Dividiendo por linea.
# Utilizando el tope de divisiones
patron.split(becquer, 5)
# Cambiando "Podrá" o "podra" por "Puede"
podra = re.compile(r'\b(P|p)odrá\b')
puede = podra.sub("Puede", becquer)
print(puede)
# Limitando el número de reemplazos
puede = podra.sub("Puede", becquer, 2)
print(puede)
# Utilizando la version no compilada de subn
re.subn(r'\b(P|p)odrá\b', "Puede", becquer) # se realizaron 5 reemplazos
# Ejemplo de findall con la funcion a nivel del modulo
# findall nos devuelve una lista con todas las coincidencias
re.findall(r'\bfoo\b', texto)
# Ejemplo de IGNORECASE
# Cambiando "Podrá" o "podra" por "Puede"
podra = re.compile(r'podrá\b', re.I) # el patrón se vuelve más sencillo
puede = podra.sub("puede", becquer)
print(puede)
# Ejemplo de VERBOSE
mail = re.compile(r
\b # comienzo de delimitador de palabra
[\w.%+-] # usuario: Cualquier caracter alfanumerico mas los signos (.%+-)
+@ # seguido de @
[\w.-] # dominio: Cualquier caracter alfanumerico mas los signos (.-)
+\. # seguido de .
[a-zA-Z]{2,6} # dominio de alto nivel: 2 a 6 letras en minúsculas o mayúsculas.
\b # fin de delimitador de palabra
, re.X)
mails = raul.lopez@relopezbriega.com, Raul Lopez Briega,
foo bar, relopezbriega@relopezbriega.com.ar, raul@github.io,
https://relopezbriega.com.ar, https://relopezbriega.github.io,
python@python, river@riverplate.com.ar, pythonAR@python.pythonAR
# filtrando los mails con estructura válida
mail.findall(mails)
# Accediendo a los grupos por sus indices
patron = re.compile(r"(\w+) (\w+)")
s = patron.search("Raul Lopez")
# grupo 1
s.group(1)
# grupo 2
s.group(2)
# Accediendo a los grupos por nombres
patron = re.compile(r"(?P<nombre>\w+) (?P<apellido>\w+)")
s = patron.search("Raul Lopez")
# grupo nombre
s.group("nombre")
# grupo apellido
s.group("apellido")
# Validando una URL
url = re.compile(r"^(https?:\/\/)?([\da-z\.-]+)\.([a-z\.]{2,6})([\/\w \.-]*)*\/?$")
# vemos que https://relopezbriega.com.ar lo acepta como una url válida.
url.search("https://relopezbriega.com.ar")
# pero https://google.com/un/archivo!.html no la acepta por el carcter !
print(url.search("https://google.com/un/archivo!.html"))
# Validando una dirección IP
patron = ('^(?:(?:25[0-5]|2[0-4][0-9]|'
'[01]?[0-9][0-9]?)\.){3}'
'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$')
ip = re.compile(patron)
# la ip 73.60.124.136 es valida
ip.search("73.60.124.136")
# pero la ip 256.60.124.136 no es valida
print(ip.search("256.60.124.136"))
# Validando una fecha
fecha = re.compile(r'^(0?[1-9]|[12][0-9]|3[01])/(0?[1-9]|1[012])/((19|20)\d\d)$')
# validando 13/02/1982
fecha.search("13/02/1982")
# no valida 13-02-1982
print(fecha.search("13-02-1982"))
# no valida 32/12/2015
print(fecha.search("32/12/2015"))
# no valida 30/14/2015
print(fecha.search("30/14/2015"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Buscando coincidencias
Step3: Ahora que ya tenemos el objeto de expresión regular compilado podemos utilizar alguno de los siguientes métodos para buscar coincidencias con nuestro texto.
Step4: Como podemos ver en estos ejemplos, cuando hay coincidencias, Python nos devuelve un Objeto de coincidencia (salvo por el método findall() que devuelve una lista). Este Objeto de coincidencia también tiene sus propios métodos que nos proporcionan información adicional sobre la coincidencia; éstos métodos son
Step6: Modificando el texto de entrada
Step7: Funciones no compiladas
Step10: Banderas de compilación
Step11: Como podemos ver en este último ejemplo, la opción VERBOSE puede ser muy util para que cualquier persona que lea nuestra expresion regular pueda entenderla más fácilmente.
Step12: Podemos utilizar la sintaxis especial (?P<nombre>patron) que nos ofrece Python para nombrar estos grupos y que sea más fácil identificarlos.
Step13: Otros ejemplos de expresiones regulares
Step14: Validando una dirección IP
Step15: Validando una fecha
|
14,249
|
<ASSISTANT_TASK:>
Python Code:
import magma as m
from mantle import DFF
class Register(m.Generator):
Generate an n-bit register
Interface
---------
I : In(Bits[width]), O : Out(Bits[width])
@staticmethod
def generate(width: int):
T = m.Bits[width]
class _Register(m.Circuit):
name = f'Register{width}'
io = m.IO(I=m.In(T), O=m.Out(T)) + m.ClockIO()
reg = m.join(m.col(lambda y: DFF(name=f"reg{y}"), width))
m.wire(reg(io.I), io.O)
return _Register
print(repr(Register.generate(4)))
class Decode(m.Generator):
@staticmethod
def generate(value: int, width: int):
class _Decode(m.Circuit):
name = f"Decode{width}_{value}"
io = m.IO(I=m.In(m.Bits[width]),
O=m.Out(m.Bit))
io.O @= io.I == value
return _Decode
class Decoder(m.Generator):
@staticmethod
def generate(width: int):
class _Decoder(m.Circuit):
io = m.IO(I=m.In(m.Bits[width]),
O=m.Out(m.Bits[1 << width]))
io.O @= m.fork(m.col(lambda y: Decode(y, width), 1 << width))(io.I)
return _Decoder
print(repr(Decoder.generate(2)))
class SISO(m.Generator):
Generate Serial-In, Serial-Out shift register with `n` cycles of delay.
I : In(Bit), O : Out(Bit)
@staticmethod
def generate(n: int):
class _SISO(m.Circuit):
name = f'SISO{n}'
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit)) + m.ClockIO()
reg = m.fold(m.col(lambda y: DFF(name=f"reg{y}"), n))
m.wire(reg(io.I), io.O)
return _SISO
print(repr(SISO.generate(4)))
class SIPO(m.Generator):
Generate Serial-In, Parallel-Out shift register.
I : In(Bit), O : Out(Bits[n])
@staticmethod
def generate(n: int):
T = m.Bits[n]
class _SIPO(m.Circuit):
name = f'SIPO{n}'
io = m.IO(I=m.In(m.Bit), O=m.Out(T)) + m.ClockIO()
reg = m.scan(m.col(lambda y: DFF(name=f"reg{y}"), n))
m.wire(reg(io.I), io.O)
return _SIPO
print(repr(SIPO.generate(4)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Register - col and join
Step3: fork
Step5: There is a lot going on in this function.
Step7: scan
|
14,250
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
DO NOT MODIFY THIS CELL
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
DO NOT MODIFY THIS CELL
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)
return conv_layer
DO NOT MODIFY THIS CELL
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None)
conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Add placeholder to indicate whether or not we're training the model
is_training = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
# Tell TensorFlow to update the population statistics while training
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels
is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training: False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
in_channels = prev_layer.get_shape().as_list()[3]
out_channels = layer_depth*4
weights = tf.Variable(
tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))
bias = tf.Variable(tf.zeros(out_channels))
conv_layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, bias)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Batch Normalization using tf.layers.batch_normalization<a id="example_1"></a>
Step6: We'll use the following function to create convolutional layers in our network. They are very basic
Step8: Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions).
Step10: With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
Step12: TODO
Step13: TODO
Step15: With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output
Step17: TODO
Step18: TODO
|
14,251
|
<ASSISTANT_TASK:>
Python Code:
# TODO: You Must Change the setting bellow
MYSQL = {
'user': 'root',
'passwd': '',
'db': 'coupon_purchase',
'host': '127.0.0.1',
'port': 3306,
'local_infile': True,
'charset': 'utf8',
}
DATA_DIR = '/home/nasuno/recruit_kaggle_datasets' # ディレクトリの名前に日本語(マルチバイト文字)は使わないでください。
OUTPUTS_DIR = '/home/nasuno/recruit_kaggle/outputs' # 予測結果などを保存するディレクトリ。
%matplotlib inline
import matplotlib.pyplot as plt
import MySQLdb
import numpy
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
from sklearn.metrics import f1_score, accuracy_score
from sklearn.linear_model import LogisticRegression
from datetime import datetime, timedelta
from itertools import product
# Random Seed
rng = numpy.random.RandomState(1234)
dbcon = MySQLdb.connect(**MYSQL)
dbcur = dbcon.cursor()
dbcur.execute('''DROP TABLE IF EXISTS user_list;''') # チュートリアルの便宜上、一度削除します。
query = '''
CREATE TABLE IF NOT EXISTS user_list (
reg_date DATETIME,
sex_id VARCHAR(1),
age INT,
withdraw_date DATETIME,
pref_name VARCHAR(15),
user_id_hash VARCHAR(32),
PRIMARY KEY(user_id_hash),
INDEX(reg_date),
INDEX(sex_id),
INDEX(age),
INDEX(withdraw_date),
INDEX(pref_name)
) ENGINE=MyISAM DEFAULT CHARSET=utf8;
'''
dbcur.execute(query)
csv_path = DATA_DIR + '/user_list.csv'
query = '''
LOAD DATA LOCAL INFILE "''' + csv_path + '''"
INTO TABLE user_list
CHARACTER SET utf8
FIELDS TERMINATED BY ','
IGNORE 1 LINES
(reg_date, sex_id, age,@withdraw_date, pref_name, user_id_hash)
SET
withdraw_date = IF(CHAR_LENGTH(@withdraw_date) != 19 , '9999-12-31 23:59:59', STR_TO_DATE(@withdraw_date, "%Y-%m-%d %H:%i:%s"))
;
'''
dbcur.execute(query)
### prefecture_locations
csv_path = DATA_DIR + '/prefecture_locations.csv'
dbcur.execute('''DROP TABLE IF EXISTS prefecture_locations;''')
dbcur.execute('''
CREATE TABLE IF NOT EXISTS prefecture_locations (
pref_name VARCHAR(15),
PRIMARY KEY(pref_name),
prefectual_office VARCHAR(15),
latitude DOUBLE,
longitude DOUBLE
) ENGINE=MyISAM DEFAULT CHARSET=utf8
;
''')
dbcur.execute('''
LOAD DATA LOCAL INFILE "''' + csv_path + '''"
INTO TABLE prefecture_locations
CHARACTER SET utf8
FIELDS TERMINATED BY ','
IGNORE 1 LINES
(pref_name, prefectual_office, latitude, longitude)
;
''')
### coupon_area_train
csv_path = DATA_DIR + '/coupon_area_train.csv'
dbcur.execute('''DROP TABLE IF EXISTS coupon_area_train;''')
dbcur.execute('''
CREATE TABLE IF NOT EXISTS coupon_area_train (
small_area_name VARCHAR(32),
pref_name VARCHAR(15),
coupon_id_hash VARCHAR(32),
INDEX(coupon_id_hash),
INDEX(pref_name)
) ENGINE=MyISAM DEFAULT CHARSET=utf8
;
''')
dbcur.execute('''
LOAD DATA LOCAL INFILE "''' + csv_path + '''"
INTO TABLE coupon_area_train
CHARACTER SET utf8
FIELDS TERMINATED BY ','
IGNORE 1 LINES
(small_area_name,pref_name,coupon_id_hash)
;
''')
### coupon_area_test
csv_path = DATA_DIR + '/coupon_area_test.csv'
dbcur.execute('''DROP TABLE IF EXISTS coupon_area_test;''')
dbcur.execute('''
CREATE TABLE IF NOT EXISTS coupon_area_test (
small_area_name VARCHAR(32),
pref_name VARCHAR(15),
coupon_id_hash VARCHAR(32),
INDEX(coupon_id_hash),
INDEX(pref_name)
) ENGINE=MyISAM DEFAULT CHARSET=utf8
;
''')
dbcur.execute('''
LOAD DATA LOCAL INFILE "''' + csv_path + '''"
INTO TABLE coupon_area_test
CHARACTER SET utf8
FIELDS TERMINATED BY ','
IGNORE 1 LINES
(small_area_name,pref_name,coupon_id_hash)
;
''')
### coupon_detail_train
csv_path = DATA_DIR + '/coupon_detail_train.csv'
dbcur.execute('''DROP TABLE IF EXISTS coupon_detail_train;''')
dbcur.execute('''
CREATE TABLE IF NOT EXISTS coupon_detail_train (
item_count INT,
i_date DATETIME,
small_area_name VARCHAR(32),
purchaseid_hash VARCHAR(32),
user_id_hash VARCHAR(32),
coupon_id_hash VARCHAR(32),
INDEX(coupon_id_hash)
) ENGINE=MyISAM DEFAULT CHARSET=utf8
;
''')
dbcur.execute('''
LOAD DATA LOCAL INFILE "''' + csv_path + '''"
INTO TABLE coupon_detail_train
CHARACTER SET utf8
FIELDS TERMINATED BY ','
IGNORE 1 LINES
(item_count, i_date, small_area_name, purchaseid_hash, user_id_hash, coupon_id_hash)
;
''')
### coupon_visit_train
csv_path = DATA_DIR + '/coupon_visit_train.csv'
dbcur.execute('''DROP TABLE IF EXISTS coupon_visit_train;''')
dbcur.execute('''
CREATE TABLE IF NOT EXISTS coupon_visit_train (
purchase_flg INT,
i_date DATETIME,
page_serial INT,
referrer_hash VARCHAR(128),
view_coupon_id_hash VARCHAR(128),
user_id_hash VARCHAR(32),
session_id_hash VARCHAR(128),
purchaseid_hash VARCHAR(32),
INDEX(user_id_hash, i_date),
INDEX(i_date, user_id_hash),
INDEX(view_coupon_id_hash),
INDEX(purchaseid_hash),
INDEX(purchase_flg)
) ENGINE=MyISAM DEFAULT CHARSET=utf8
;
''')
dbcur.execute('''
LOAD DATA LOCAL INFILE "''' + csv_path + '''"
INTO TABLE coupon_visit_train
CHARACTER SET utf8
FIELDS TERMINATED BY ','
IGNORE 1 LINES
(purchase_flg,i_date,page_serial,referrer_hash,view_coupon_id_hash,user_id_hash,session_id_hash,purchaseid_hash)
;
''')
### coupon_list_train
csv_path = DATA_DIR + '/coupon_list_train.csv'
dbcur.execute('''DROP TABLE IF EXISTS coupon_list_train;''')
dbcur.execute('''
CREATE TABLE IF NOT EXISTS coupon_list_train (
capsule_text VARCHAR(20),
genre_name VARCHAR(50),
price_rate INT,
catalog_price INT,
discount_price INT,
dispfrom DATETIME,
dispend DATETIME,
dispperiod INT,
validfrom DATE,
validend DATE,
validperiod INT,
usable_date_mon VARCHAR(7),
usable_date_tue VARCHAR(7),
usable_date_wed VARCHAR(7),
usable_date_thu VARCHAR(7),
usable_date_fri VARCHAR(7),
usable_date_sat VARCHAR(7),
usable_date_sun VARCHAR(7),
usable_date_holiday VARCHAR(7),
usable_date_before_holiday VARCHAR(7),
large_area_name VARCHAR(30),
ken_name VARCHAR(8),
small_area_name VARCHAR(30),
coupon_id_hash VARCHAR(32),
PRIMARY KEY(coupon_id_hash),
INDEX(ken_name),
INDEX(genre_name)
) ENGINE=MyISAM DEFAULT CHARSET=utf8
;
''')
dbcur.execute('''
LOAD DATA LOCAL INFILE "''' + csv_path + '''"
INTO TABLE coupon_list_train
CHARACTER SET utf8
FIELDS TERMINATED BY ','
IGNORE 1 LINES
(capsule_text,genre_name,price_rate,catalog_price,discount_price,dispfrom,dispend,dispperiod,validfrom,validend,@validperiod,usable_date_mon,usable_date_tue,usable_date_wed,usable_date_thu,usable_date_fri,usable_date_sat,usable_date_sun,usable_date_holiday,usable_date_before_holiday,large_area_name,ken_name,small_area_name,coupon_id_hash)
SET validperiod=IF(@validperiod = 'NA', Null, @validperiod)
;
''')
### coupon_list_test
csv_path = DATA_DIR + '/coupon_list_test.csv'
dbcur.execute('''DROP TABLE IF EXISTS coupon_list_test;''')
dbcur.execute('''
CREATE TABLE IF NOT EXISTS coupon_list_test (
capsule_text VARCHAR(20),
genre_name VARCHAR(50),
price_rate INT,
catalog_price INT,
discount_price INT,
dispfrom DATETIME,
dispend DATETIME,
dispperiod INT,
validfrom DATE,
validend DATE,
validperiod INT,
usable_date_mon VARCHAR(7),
usable_date_tue VARCHAR(7),
usable_date_wed VARCHAR(7),
usable_date_thu VARCHAR(7),
usable_date_fri VARCHAR(7),
usable_date_sat VARCHAR(7),
usable_date_sun VARCHAR(7),
usable_date_holiday VARCHAR(7),
usable_date_before_holiday VARCHAR(7),
large_area_name VARCHAR(30),
ken_name VARCHAR(8),
small_area_name VARCHAR(30),
coupon_id_hash VARCHAR(32),
PRIMARY KEY(coupon_id_hash),
INDEX(ken_name),
INDEX(genre_name)
) ENGINE=MyISAM DEFAULT CHARSET=utf8
;
''')
dbcur.execute('''
LOAD DATA LOCAL INFILE "''' + csv_path + '''"
INTO TABLE coupon_list_test
CHARACTER SET utf8
FIELDS TERMINATED BY ','
IGNORE 1 LINES
(capsule_text,genre_name,price_rate,catalog_price,discount_price,dispfrom,dispend,dispperiod,validfrom,validend,@validperiod,usable_date_mon,usable_date_tue,usable_date_wed,usable_date_thu,usable_date_fri,usable_date_sat,usable_date_sun,usable_date_holiday,usable_date_before_holiday,large_area_name,ken_name,small_area_name,coupon_id_hash)
SET validperiod=IF(@validperiod = 'NA', Null, @validperiod)
;
''')
validation_start = datetime.strptime('2012-06-17 00:00:00', '%Y-%m-%d %H:%M:%S')
validation_end = validation_start + timedelta(days=7)
dbcur.execute(''' DROP TABLE IF EXISTS coupon_visit_train_training;''') # チュートリアルの便宜上一回削除します。
dbcur.execute(''' CREATE TABLE IF NOT EXISTS coupon_visit_train_training LIKE coupon_visit_train;''')
dbcur.execute('''
INSERT INTO coupon_visit_train_training
SELECT *
FROM coupon_visit_train
WHERE i_date >= "2011-07-01 00:00:00" AND i_date < %s
;
''', (validation_start, ))
dbcur.execute(''' DROP TABLE IF EXISTS coupon_visit_train_validation;''') # チュートリアルの便宜上一回削除します。
dbcur.execute(''' CREATE TABLE IF NOT EXISTS coupon_visit_train_validation LIKE coupon_visit_train;''')
dbcur.execute('''
INSERT INTO coupon_visit_train_validation
SELECT *
FROM coupon_visit_train
WHERE i_date >= %s
;
''', (validation_start, ))
# validation 期間に購買されうるクーポンの抽出
dbcur.execute('''
SELECT
coupon_id_hash
FROM coupon_list_train
WHERE
NOT (dispend <= %s OR dispfrom > %s)
;
''', (validation_start, validation_end))
coupon_ids = []
for row in dbcur.fetchall():
coupon_ids.append(row[0])
# user_idsをselectして、ランダムに、購買アイテムを割り当てる。
dbcur.execute('''
SELECT
user_id_hash
FROM user_list
;
''')
user_pcoupon_pred = {}
for row in dbcur.fetchall():
user_pcoupon_pred[row[0]] =list(shuffle(coupon_ids, random_state=rng)[:10])
# validation期間に購買したクーポンリストを抽出。
dbcur.execute('''
SELECT
user_id_hash, view_coupon_id_hash
FROM coupon_visit_train_validation
WHERE purchase_flg = 1
;
''')
user_pcoupon_true = {}
for row in dbcur.fetchall():
if row[0] not in user_pcoupon_true:
user_pcoupon_true[row[0]] = []
user_pcoupon_true[row[0]].append(row[1])
# ap10を算出する関数を定義。
def get_ap10(y_pred, y_true):
ap10 = 0.
y_true = set(y_true)
for i in range(len(y_pred)):
if y_pred[i] in y_true:
c = set(y_pred[:i + 1])
ap10 += len(y_true & c) / float(i + 1)
ap10 /= min(len(y_true), 10)
return ap10
map10 = 0.
n_purchased_user = 0.
for user_id in user_pcoupon_pred:
if user_id not in user_pcoupon_true:
# 当該ユーザがvalidation期間にcouponを買わなかった場合、
# ap@10は0
continue
n_purchased_user += 1
y_true = user_pcoupon_true[user_id]
y_pred = user_pcoupon_pred[user_id]
map10 += get_ap10(y_pred, y_true)
max_map10 = n_purchased_user / len(user_pcoupon_pred)
map10 /= len(user_pcoupon_pred)
print 'max_map@10: %.5f, map@10: %.5f' % (max_map10, map10)
output = ['USER_ID_hash,PURCHASED_COUPONS']
for user_id in user_pcoupon_pred:
output.append(user_id + ',' + ' '.join(user_pcoupon_pred[user_id]))
output = '\n'.join(output)
with open(OUTPUTS_DIR + '/random_prediction_valid.csv', 'wb') as fid:
fid.write(output)
# ユニークな都道府県リストの取得
dbcur.execute(''' SELECT pref_name FROM prefecture_locations ORDER BY pref_name ; ''')
pref_data = []
for row in dbcur.fetchall():
pref_data.append(row)
# ユーザの素性を作成。(ユーザの素性はtraining、validation, testで共通)
dbcur.execute('''
SELECT
t1.user_id_hash,
IF(t1.sex_id = 'm', 1, 0),
(t1.age-15)/65,
''' + ', '.join([u'IF(t1.pref_name = "' + p[0] + u'", 1, 0)' for i, p in enumerate(pref_data)]) + '''
FROM user_list AS t1
''')
user_feature = {} # ユーザの素性ベクトル
for row in dbcur.fetchall():
user_feature[row[0]] = row[1:]
training_start = validation_start - timedelta(days=7) # 訓練開始日時を算出。
# カテゴリリストの取得
dbcur.execute(''' SELECT DISTINCT(capsule_text) FROM coupon_list_train ORDER BY capsule_text;''')
capsule_data = []
for row in dbcur.fetchall():
capsule_data.append(row)
# ジャンルリストの取得
dbcur.execute(''' SELECT DISTINCT(genre_name) FROM coupon_list_train ORDER BY genre_name;''')
genre_data = []
for row in dbcur.fetchall():
genre_data.append(row)
# 大エリアリストの取得
dbcur.execute(''' SELECT DISTINCT(large_area_name) FROM coupon_list_train ORDER BY large_area_name;''')
larea_data = []
for row in dbcur.fetchall():
larea_data.append(row)
# 都道府県リストの取得
dbcur.execute(''' SELECT DISTINCT(ken_name) FROM coupon_list_train ORDER BY ken_name;''')
pref_data = []
for row in dbcur.fetchall():
pref_data.append(row)
# 小エリアリストの取得
dbcur.execute(''' SELECT DISTINCT(small_area_name) FROM coupon_list_train ORDER BY small_area_name;''')
sarea_data = []
for row in dbcur.fetchall():
sarea_data.append(row)
def get_item_feature(f_date, t_date):
# クーポンの素性を作成する関数。
# @f_date:対象期間の開始日時
# @t_date:対象期間の終了日時
# テーブルが訓練用のテーブルとなっている為、training とvalidationのデータを作成する際にしか利用できない。
dbcur.execute('''
SELECT
coupon_id_hash,
''' + ', '.join([u'IF(capsule_text = "' + p[0] + u'", 1, 0)' for i, p in enumerate(capsule_data)]) + ''',
''' + ', '.join([u'IF(genre_name = "' + p[0] + u'", 1, 0)' for i, p in enumerate(genre_data)]) + ''',
COALESCE(CAST(usable_date_mon AS SIGNED), 0),
COALESCE(CAST(usable_date_tue AS SIGNED), 0),
COALESCE(CAST(usable_date_wed AS SIGNED), 0),
COALESCE(CAST(usable_date_thu AS SIGNED), 0),
COALESCE(CAST(usable_date_fri AS SIGNED), 0),
COALESCE(CAST(usable_date_sat AS SIGNED), 0),
COALESCE(CAST(usable_date_sun AS SIGNED), 0),
COALESCE(CAST(usable_date_holiday AS SIGNED), 0),
COALESCE(CAST(usable_date_before_holiday AS SIGNED), 0),
''' + ', '.join([u'IF(large_area_name = "' + p[0] + u'", 1, 0)' for i, p in enumerate(larea_data)]) + ''',
''' + ', '.join([u'IF(ken_name = "' + p[0] + u'", 1, 0)' for i, p in enumerate(pref_data)]) + ''',
''' + ', '.join([u'IF(small_area_name = "' + p[0] + u'", 1, 0)' for i, p in enumerate(sarea_data)]) + '''
FROM coupon_list_train
WHERE
NOT (dispend <= %s OR dispfrom > %s)
;
''', (f_date, t_date))
item_feature = {} # クーポンの素性
for row in dbcur.fetchall():
item_feature[row[0]] = row[1:]
return item_feature
item_feature_train = get_item_feature(training_start, validation_start) # training 期間のクーポンの素性
item_feature_valid = get_item_feature(validation_start, validation_end) # validation 期間のクーポンの素性
print 'n_item_train: %d, n_item_valid: %d' % (len(item_feature_train), len(item_feature_valid))
def get_purchased_coupons(f_date, t_date):
# 実際に購買されるクーポンの取得
# @f_date:対象期間の開始日時
# @t_date:対象期間の終了日時
dbcur.execute('''
SELECT user_id_hash, view_coupon_id_hash
FROM coupon_visit_train
WHERE i_date >= %s AND i_date < %s AND purchase_flg = 1
ORDER BY user_id_hash, view_coupon_id_hash
;
''', (f_date, t_date))
purchased_items = {} # 各ユーザがどのクーポン群を購入するかを辞書型で返す。
for row in dbcur.fetchall():
if row[0] not in purchased_items:
purchased_items[row[0]] = set([])
purchased_items[row[0]].add(row[1])
return purchased_items
user_pcoupon_train = get_purchased_coupons(training_start, validation_start) # training 期間に各ユーザが実際に買ったクーポン
user_pcoupon_valid = get_purchased_coupons(validation_start, validation_end) # validation 期間に各ユーザが実際に買ったクーポン
n_pairs_train = len(user_feature) * len(item_feature_train) # ユーザ数×trainingクーポン数
n_pairs_valid = len(user_feature) * len(item_feature_valid) # ユーザ数×validation クーポン数
print 'n_train_datasets: %d, n_validation_datasets: %d, n_puser: %d' %(n_pairs_train, n_pairs_valid, len([1 for a in user_pcoupon_train if len(a) > 0]))
# 訓練データに利用するユーザをtraining期間に、実際にクーポンを購入したユーザに限定し、そのユーザIDとクーポンのIDの全組み合せを出力する。
pairs_train = list(product([k for k in user_pcoupon_train if len(user_pcoupon_train[k]) > 0], item_feature_train.keys()))
print 'n_train_datasets: %d' %(len(pairs_train), )
features_train = [] # 学習に用いる素性
labels_train = [] # 学習に用いるラベル
for pair in pairs_train: # 各ユーザ、アイテムペアについて
user_id, item_id = pair
features_train.append(user_feature[user_id] + item_feature_train[item_id]) # 単純な結合
if user_id in user_pcoupon_train and item_id in user_pcoupon_train[user_id]:
# 購買された
labels_train.append(1)
else:
# 購買されなかった
labels_train.append(0)
model = LogisticRegression() # ロジスティック回帰のモデル構築(ハイパーパラメタの調整は省略)。インスタンス化。
model.fit(features_train, labels_train) # x, y~を入力して学習
purchase_index = numpy.argmax(model.classes_) # 1(=購買ラベル)がついている方のカラムインデックスを取得
item_index_to_item_id = sorted(item_feature_valid.keys()) # クーポンの番号をクーポンIDに変換する。
map10 = 0.
for user_id in user_feature: # map@10はユーザごとにap@10を算出する。
if user_id not in user_pcoupon_valid: # 購入したクーポンが亡ければ、ap@10は0なので、スコア評価時には飛ばす。
continue
feature = []
for item_id in item_index_to_item_id:
feature.append(user_feature[user_id] + item_feature_valid[item_id]) # 単純にユーザ素性とクーポン素性を結合
y_proba = model.predict_proba(feature) # 各クーポンの購買確率を算出
y_pred_indices = numpy.argsort(y_proba[:, purchase_index])[-10:][::-1] # 購入確率が高いクーポン上位10個のクーポン番号を取得
y_pred_item_ids = [item_index_to_item_id[i] for i in y_pred_indices] # クーポン番号をクーポンIDに変換。
map10 += get_ap10(y_pred_item_ids, user_pcoupon_valid[user_id]) # ap@10を計算して、map@10に足す。
map10 /= len(user_feature) # map@10はユーザ平均なので、全ユーザで割る。
print 'MAP@10: %.5f' % (map10, )
dbcur.execute('''
SELECT
COUNT(*),
SUM(purchase_flg),
COUNT(DISTINCT(view_coupon_id_hash))
FROM
coupon_visit_train
GROUP BY user_id_hash
;
''')
n_view = []
n_purchase = []
n_view_u = []
for row in dbcur.fetchall():
n_view.append(int(row[0]))
n_purchase.append(int(row[1]))
n_view_u.append(int(row[2]))
n_view = numpy.asarray(n_view)
n_purchase = numpy.asarray(n_purchase)
n_view_u = numpy.asarray(n_view_u)
### user-coldstartがどういった状況か見る為に、最初の20件だけ見る。
span = 20
fig = plt.figure(figsize=(18, 8))
ax = fig.add_subplot(2, 3, 1)
ax.hist(n_view, bins=numpy.arange(0, span), cumulative=True)
ax.set_title('page view count distribution')
ax = fig.add_subplot(2, 3, 2)
ax.hist(n_purchase, bins=numpy.arange(0, span), cumulative=True)
ax.set_title('purchase count distribution')
ax = fig.add_subplot(2, 3, 3)
ax.hist(n_view_u, bins=numpy.arange(0, span), cumulative=True)
ax.set_title('unique page view count distribution')
ax = fig.add_subplot(2, 3, 4)
ax.plot(n_view, n_purchase, 'x')
ax.set_title('X=page view count, Y=purchase count')
ax = fig.add_subplot(2, 3, 5)
ax.plot(n_view_u, n_purchase, 'x')
ax.set_title('X=unique page view count, Y=purchase count')
ax = fig.add_subplot(2, 3, 6)
ax.plot(n_view, n_view_u, 'x')
ax.set_title('X=page view count, Y=unique page view count')
plt.show()
## 3Dにしても良く分からないことが多いので,辞めましょう。
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(n_view, n_view_u, n_purchase, marker='x')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
dbcur.execute('''
SELECT
t1.coupon_id_hash, COUNT(t2.view_coupon_id_hash), COALESCE(SUM(t2.purchase_flg), 0)
FROM coupon_list_test AS t1
LEFT JOIN coupon_visit_train AS t2 ON t1.coupon_id_hash = t2.view_coupon_id_hash
GROUP BY t1.coupon_id_hash
ORDER BY SUM(t2.purchase_flg)
;
''')
view_count = []
purchase_count = []
for row in dbcur.fetchall():
view_count.append(int(row[1]))
purchase_count.append(int(row[2]))
view_count = numpy.asarray(view_count)
purchase_count = numpy.asarray(purchase_count)
plt.figure()
plt.plot(purchase_count, view_count, '.')
plt.show()
dbcur.execute('''
SELECT
AVG(same_pref_purchase_cnt),
AVG(same_pref_view_cnt),
AVG(same_pref_purchase_cnt / same_pref_view_cnt),
AVG(diff_pref_purchase_cnt),
AVG(diff_pref_view_cnt),
AVG(diff_pref_purchase_cnt / diff_pref_view_cnt)
FROM (
SELECT
t1.user_id_hash,
SUM(t1.pref_name = t3.ken_name AND purchase_flg = 1) AS same_pref_purchase_cnt,
SUM(t1.pref_name = t3.ken_name) AS same_pref_view_cnt,
SUM(t1.pref_name != t3.ken_name AND purchase_flg = 1) AS diff_pref_purchase_cnt,
SUM(t1.pref_name != t3.ken_name) AS diff_pref_view_cnt
FROM user_list AS t1
LEFT JOIN coupon_visit_train AS t2 ON t1.user_id_hash = t2.user_id_hash
LEFT JOIN coupon_list_train AS t3 ON t2.view_coupon_id_hash = t3.coupon_id_hash
WHERE t1.pref_name != ""
GROUP BY t1.user_id_hash
) AS t1
;
''')
data = None
for row in dbcur.fetchall():
data = row
print 'same_purchase: %.2f, same_view: %.2f, same_rate: %.2f, diff_purchase: %.2f, diff_view: %.2f, diff_rate: %.2f' % (data)
dbcur.execute('''
SELECT
t1.sex_id,
AVG(t1.discount_rate_view),
AVG(t1.discount_rate_purchase)
FROM (
SELECT
t1.user_id_hash,
t1.sex_id,
AVG(100 - t3.price_rate) AS discount_rate_view,
COALESCE(SUM(IF(t2.purchase_flg, 100 - t3.price_rate, 0)) / SUM(t2.purchase_flg), 0) AS discount_rate_purchase
FROM user_list AS t1
LEFT JOIN coupon_visit_train AS t2 ON t1.user_id_hash = t2.user_id_hash
LEFT JOIN coupon_list_train AS t3 ON t2.view_coupon_id_hash = t3.coupon_id_hash
GROUP BY t1.user_id_hash
) AS t1
GROUP BY t1.sex_id
;
''')
data = []
for row in dbcur.fetchall():
row = list(row)
row[1] = float(row[1])
row[2] = float(row[2])
data.append(tuple(row))
for row in data:
print 'sex_id: %s, discount_rate_view: %.2f, discount_rate_purchase: %.2f' % (row)
dbcur.execute('''
SELECT
SUM(purchase_flg)
FROM coupon_visit_train_validation
WHERE purchase_flg = 1
GROUP BY user_id_hash
;
''')
x = []
for row in dbcur.fetchall():
x.append(int(row[0]))
plt.figure()
plt.hist(x, bins=numpy.arange(1, 15))
plt.show()
dbcur.execute('''
SELECT
AVG(t1.same_purchase),
AVG(t1.same_view),
AVG(t1.same_purchase / t1.same_view) AS same_rate,
AVG(t1.diff_purchase),
AVG(t1.diff_view),
AVG(t1.diff_purchase / t1.diff_view) AS diff_rate
FROM (
SELECT
t1.user_id_hash,
SUM(t1.genre_name = t3.genre_name AND t2.purchase_flg = 1) AS same_purchase,
SUM(t1.genre_name = t3.genre_name) AS same_view,
SUM(t1.genre_name != t3.genre_name AND t2.purchase_flg = 1) AS diff_purchase,
SUM(t1.genre_name != t3.genre_name) AS diff_view
FROM (
SELECT
t1.user_id_hash, t1.view_coupon_id_hash, t3.genre_name
FROM coupon_visit_train_training AS t1
LEFT JOIN coupon_visit_train_training AS t2 ON t1.user_id_hash = t2.user_id_hash AND t1.i_date < t2.i_date
LEFT JOIN coupon_list_train AS t3 ON t1.view_coupon_id_hash = t3.coupon_id_hash
WHERE t1.purchase_flg = 1 AND t2.user_id_hash IS NULL
GROUP BY t1.user_id_hash
) AS t1
LEFT JOIN coupon_visit_train_validation AS t2 ON t1.user_id_hash = t2.user_id_hash
LEFT JOIN coupon_list_train AS t3 ON t2.view_coupon_id_hash = t3.coupon_id_hash
LEFT JOIN (
SELECT user_id_hash
FROM coupon_visit_train_validation
WHERE purchase_flg = 1
GROUP BY user_id_hash
) AS t4 ON t1.user_id_hash = t4.user_id_hash
WHERE t4.user_id_hash IS NOT NULL
GROUP BY t1.user_id_hash
) AS t1
;
''')
data = None
for row in dbcur.fetchall():
data = row
print 'same_purchase: %.2f, same_view: %.2f, same_rate: %.2f, diff_purchase: %.2f, diff_view: %.2f, diff_rate: %.2f' % (data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: モジュールのimportや変数の初期化
Step2: 2. データベースへのデータの格納
Step3: 次に、データのインサートです。
Step4: テーブルの作成に利用したCREATE TABLE文には、
Step5: 実行すると、それぞれのレコードでWarningが発生しますが、
Step6: coupon_detail_train.csv
Step7: coupon_visit_train.csv
Step8: coupon_list_train.csv, coupon_list_test.csv
Step9: 3. モデリング対象の設定
Step10: ランダム推定・MAP@10の評価
Step11: 2. 抽出したクーポン群から各ユーザが購買するクーポンをランダムに10個選び、予測結果とする。
Step12: 3. 実際に購買したクーポンと照らし合わせ、MAP@10を算出する。
Step13: ランダムだと、全然当たらないですね。
Step14: Excercise
Step15: クーポンの特徴ベクトル
Step16: ユーザ・クーポンの特徴ベクトルと正解ラベルの割当
Step17: 全部のペアを考慮すると1000万行程度となってしまいメモリに乗り切らなさそうです。
Step18: 予測モデルの構築・精度評価
Step19: 先ほどの、ランダム予測よりだいぶ上がったようです。
Step20: 最終的な精度評価に用いるテストデータに含まれる各クーポンに対して、どれくらいviewやpurchaseのデータが存在するか、の確認。
Step21: 関係性についての仮説をたてる
Step22: まず、同じ地域からの購買よりも,異なる地域からの購買の方が多いことが分かります。
Step23: あまり、変わらないですね、、、
|
14,252
|
<ASSISTANT_TASK:>
Python Code:
aapl = data.DataReader('AAPL', 'yahoo', '2000-01-01')
print(aapl.head())
plt.plot(aapl.Close)
ibm = data.DataReader('AAPl', 'yahoo', '2000-1-1')
print(ibm['Adj Close'].head())
%matplotlib inline
ibm['Adj Close'].plot(figsize=(10,6))
plt.ylabel('price')
plt.xlabel('year')
plt.title('Price history of IBM stock')
ibm = data.DataReader('IBM', 'yahoo', '2000-1-1')
print(ibm['Adj Close'].head())
%matplotlib inline
ibm['Adj Close'].plot(figsize=(10,6))
plt.ylabel('price')
plt.xlabel('year')
plt.title('Price history of IBM stock')
Log_Data = plt.figure()
%matplotlib inline
plt.plot(np.log(aapl['Adj Close']))
plt.ylabel('logarithmic price')
plt.xlabel('year')
plt.title('Logarithmic price history of Apple stock')
S0 = 1
sigma = 0.2/np.sqrt(252)
mu = 0.08/252
%matplotlib inline
for i in range(0, 5):
r = np.random.randn((1000))
plt.plot(S0 * np.cumprod(np.exp(sigma *r +mu)))
S0 = 1.5 # start price
K = 1.0 # strike price
mu = 0 # average growth
sigma = 0.2/np.sqrt(252) # volatility
N = 10000 # runs
M = 252*4 # length of each run (252 business days per year times 4 years)
def call_price(S, K):
return max(0.0, S-K)
def MC_call_price(S0, K, mu, sigma, N, M):
CSum = 0
SSum = 0
for n in range(N):
r = np.random.randn((M))
S = S0 * np.cumprod(np.exp(sigma *r))
SSum += S
CSum += call_price(S[M-1], K)
return CSum/N
S0 = np.linspace(0.0, 2.0,21)
C = []
for k in range(21):
C.append(MC_call_price(k*2/20, K, mu, sigma, N, M))
C
plt.plot(S0, C)
plt.ylabel('Call price')
plt.xlabel('Start price')
plt.title('Call price')
plt.show()
from IPython.display import Image
Image("Picture_Then_Miracle_Occurs.PNG")
d_1 = lambda σ, T, t, S, K: 1. / σ / np.sqrt(T - t) * (np.log(S / K) + 0.5 * (σ ** 2) * (T-t))
d_2 = lambda σ, T, t, S, K: 1. / σ / np.sqrt(T - t) * (np.log(S / K) - 0.5 * (σ ** 2) * (T-t))
call = lambda σ, T, t, S, K: S * sp.stats.norm.cdf( d_1(σ, T, t, S, K) ) - K * sp.stats.norm.cdf( d_2(σ, T, t, S, K) )
Delta = lambda σ, T, t, S, K: sp.stats.norm.cdf( d_1(σ, T, t, S, K) )
plt.plot(np.linspace(sigma, 4., 100), call(1., 1., .9, np.linspace(0.1, 4., 100), 1.))
plt.plot(d_1(1., 1., 0., np.linspace(0.1, 2.9, 10), 1))
#plt.plot(np.linspace(sigma, 4., 100), Delta(1., 1., .9, np.linspace(0.1, 4., 100), 1.))
plt.plot(np.linspace(sigma, 1.9, 100), Delta(1., 1., 0.2, np.linspace(0.01, 1.9, 100), 1.))
plt.plot(np.linspace(sigma, 1.9, 100), Delta(1., 1., 0.6, np.linspace(0.01, 1.9, 100), 1.))
plt.plot(np.linspace(sigma, 1.9, 100), Delta(1., 1., 0.9, np.linspace(0.01, 1.9, 100), 1.))
plt.plot(np.linspace(sigma, 1.9, 100), Delta(1., 1., 0.99, np.linspace(0.01, 1.9, 100), 1.))
plt.plot(np.linspace(sigma, 1.9, 100), Delta(1., 1., 0.9999, np.linspace(0.01, 1.9, 100), 1.))
plt.xlabel("Price/strike price")
plt.ylabel("$\Delta$")
plt.legend(['t = 0.2','t = 0.6', 't = 0.9', 't = 0.99', 't = 0.9999'], loc = 2)
N = 10 #runs
def Simulate_Price_Series(S0, sigma, N, M):
for n in (1,N):
r = np.random.randn((M))
S = S0 * np.cumprod(np.exp(sigma *r))
for m in (1,M):
P.append = Delta(sigma, M, m, S, K)*
return S
plt.plot(1+np.cumsum(np.diff(S) * Delta(sigma, 4, 0, S, K)[1, M-1]))
plt.plot(S)
S
len(Delta(sigma, 4, 0, S, K)[[1:999]])
def Calculate_Portfolio(S0, K, mu, sigma, N, M):
S = Simulate_Price_Series(S0, sigma, N, M)
StockDelta = Delta(sigma, 4, 0, S, K) )
vol = vol0 * np.cumprod(np.exp(sigma*r2)
S = S0 * np.cumprod(np.exp(vol * r))
SSum += S
CSum += call_price(S[M-1], K)
np.histogram(np.diff(aapl['Adj Close']))
plt.hist(np.diff(aapl['Adj Close']), bins='auto') # plt.hist passes it's arguments to np.histogram
plt.title("Histogram of daily returns for Apple")
plt.show()
def MC_call_price_Loc_Vol(S0, K, mu, sigma, N, M):
CSum = 0
SSum = 0
for n in range(N):
r = np.random.randn((M))
r2 = np.random.randn((M))
vol = vol0 * np.cumprod(np.exp(sigma*r2)
S = S0 * np.cumprod(np.exp(vol * r))
SSum += S
CSum += call_price(S[M-1], K)
return CSum/N
S0 = np.linspace(0.0, 2.0,21)
CLoc = []
for k in range(21):
CLoc.append(MC_call_price_Loc_Vol(k*2/20, K, mu, 0.1*sigma, N, M))
CLoc
plt.plot(S0, C)
plt.plot(S0, CLoc)
plt.ylabel('Call price')
plt.xlabel('Start price')
plt.title('Call price')
plt.show()
def iterate_series(n=1000, S0 = 1):
while True:
r = np.random.randn((n))
S = np.cumsum(r) + S0
yield S, r
for (s, r) in iterate_series():
t, t_0 = 0, 0
for t in np.linspace(0, len(s)-1, 100):
r = s[int(t)] / s[int(t_0)]
t_0 = t
break
state = (stock_val, besitz)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: $\Rightarrow$ various different price series
Step2: $\Longrightarrow$ There was a stock split 7
Step3: Define new financial instruments
Step4: Now the roughness of the chart looks more even $\Rightarrow$ We should model increments proportional to the stock price!
Step5: Optionprices
Step6: This curve can also be calculated theoretically. Using stochastic calculus, one can deduce the famous Black-Scholes equation, to calculate this curve. We will not go into detail ...
Step7: ... but will just state the final result!
Step8: For small prices we do not need to own shares, to hedge the option. For high prices we need exactly one share. The interesting area is around the strike price.
Step9: Challenges
Step10: This is not a normal distribution!
Step11: Proposed solution
|
14,253
|
<ASSISTANT_TASK:>
Python Code:
import os
from urllib.request import urlretrieve
import numpy as np
import matplotlib.pyplot as plt
from SeisCL import SeisCL
url = "http://sw3d.cz/software/marmousi/little.bin/velocity.h@"
if not os.path.isfile("velocity.h@"):
urlretrieve(url, filename="velocity.h@")
vel = np.fromfile("velocity.h@", dtype=np.float32)
vp = np.transpose(np.reshape(np.array(vel), [2301, 751]))
seis = SeisCL()
vp = vp[::4, ::4]
vp = np.pad(vp, ((seis.nab, seis.nab), (seis.nab, seis.nab)), mode="edge")
rho = vp * 0 + 2000
vs = vp * 0
model = {'vp':vp, 'vs':vs, 'rho':rho}
seis.N = vp.shape
seis.ND = 2
seis.dh = 16
seis.dt = dt = 6 * seis.dh / (7 * np.sqrt(2) * np.max(vp)) * 0.85
seis.NT = int(3 / seis.dt)
seis.surface_acquisition_2d()
print(seis.N, vp.shape)
_, ax = plt.subplots(1, 1, figsize = (18, 6))
seis.DrawDomain2D(vp, ax = ax, showabs=True, showsrcrec=True)
seis.set_forward([50], model, withgrad=False)
seis.execute()
data = seis.read_data()
p = data[0]
xmin = np.min(seis.rec_pos_all[0, :])
xmax = np.max(seis.rec_pos_all[0, :])
clip=0.01;
vmin=np.min(p)*clip;
vmax=np.max(p)*clip;
fig, ax = plt.subplots()
im = ax.imshow(p,
interpolation='bilinear',
vmin=vmin,
vmax=vmax,
cmap=plt.get_cmap('Greys'),
aspect='auto',
origin='upper',
extent=[xmin,xmax, p.shape[0]*seis.dt*20,0]
)
fig.suptitle('Pressure', fontsize=20)
plt.xlabel('x (km)', fontsize=16)
plt.ylabel('Time (ms)', fontsize=14)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For inversion, we often want a coaser grid. We must also pad the model for the absorbing boundary and create the vs and rho paramters.
Step2: We create an instance of SeisCL and setup the geometry.
Step3: The model is now ready, with sources just outside the CMPL region.
Step4: Let's compute the seismograph for source 50.
Step5: The figure showing the recorded data is finally created.
|
14,254
|
<ASSISTANT_TASK:>
Python Code:
# import requirements
import pandas as pd
import nltk
#import gensim
import spacy
# New York Times data
## read subset of data from csv file into panadas dataframe
df = pd.read_csv('1_100.csv')
## for now, chosing one article to illustrate preprocessing
article = df['full_text'][939]
# Stack Overflow data
## ## read subset of data from csv file into panadas dataframe
df2 = pd.read_csv('doc_200.csv')
## for now, chosing one article to illustrate preprocessing
posting = df2['Document'][1]
# NY Times
article[:500]
# Stack Overflow
posting[:500]
print(article[:500].decode('utf-8').lower())
print(posting[:500].decode('utf-8').lower())
from nltk.tokenize import WhitespaceTokenizer
ws_tokenizer = WhitespaceTokenizer()
# tokenize example document
nyt_ws_tokens = ws_tokenizer.tokenize(article.decode('utf-8').lower())
print nyt_ws_tokens[:75]
from nltk.tokenize import RegexpTokenizer
re_tokenizer = RegexpTokenizer(r'\w+')
nyt_re_tokens = re_tokenizer.tokenize(article.decode('utf-8').lower())
print nyt_re_tokens[:100]
from nltk.corpus import stopwords
# print the first 5 standard English stop words
stop_list = [w for w in stopwords.words('english')]
print stop_list[:5]
# print the type of the elements in the stop words list
print type(stop_list[0])
cleaned_tokens = []
stop_words = set(stopwords.words('english'))
for token in nyt_re_tokens:
if token not in stop_words:
cleaned_tokens.append(token)
print 'Number of tokens before removing stop words: %d' % len(nyt_re_tokens)
print 'Number of tokens after removing stop words: %d' % len(cleaned_tokens)
print cleaned_tokens[:50]
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.lancaster import LancasterStemmer
porter = PorterStemmer()
snowball = SnowballStemmer('english')
lancaster = LancasterStemmer()
print 'Porter Stem of "explanation": %s' % porter.stem('explanation')
print 'Porter2 (Snowball) Stem of "explanation": %s' %snowball.stem('explanation')
print 'Lancaster Stem of "explanation": %s' %lancaster.stem('explanation')
from nltk.stem.wordnet import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
print lemmatizer.lemmatize('explanation')
stemmed_tokens = []
lemmatized_tokens = []
for token in cleaned_tokens:
stemmed_tokens.append(stemmer.stem(token))
lemmatized_tokens.append(lemmatizer.lemmatize(token))
print stemmed_tokens[:50]
print lemmatized_tokens[:50]
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
stemmed_article = ' '.join(wd for wd in stemmed_tokens)
article_vect = vectorizer.fit_transform([stemmed_article])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2>Data</h2>
Step2: Let's take a peek at the raw text of this article to see what we are dealing with!
Step3: <h2>Preprocessing Text</h2>
Step4: <h3>1. Tokenization</h3>
Step5: Example
Step6: <h3>2. Stop Words</h3>
Step7: Let's remove the stop words and compare to our original list of tokens from our regular expression tokenizer.
Step8: You can see that by removing stop words, we now have less than half the number of tokens as our original list. Taking a peek at the cleaned tokens, we can see that a lot of the information that makes the sentence read like something a human would expect has been lost but the key nouns, verbs, adjectives, and adverbs remain.
Step9: You may notice from looking at this sample, however, that a potentially meaningful word has been removed
Step10: While <b><em>stemming</em></b> is a heuristic process that selectively removes the end of words, <b><em>lemmatization</em></b> is a more sophisticated process that takes into account variables such as part-of-speech, meaning, and context within a document or neighboring sentences.</p>
Step11: <p>In this example, lemmatization retains a bit more information than stemming. Within stemming, the Lancaster method is more aggressive than Porter and Snowball. Remember that this step allows us to reduce words to a common base form so that we can reduce our feature space and perform counting of occurrences. It will depend on your data and your application as to how much information you need to retain. </p>
Step12: Let's take a look at a sample of our stemmed tokens
Step13: In contrast, here are the same tokens in their lemmatized form
Step14: <h3>4. Vectorization </h3>
|
14,255
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Audio,Image, YouTubeVideo
YouTubeVideo('S5SG9km2f_A', height=450, width=900)
%matplotlib inline
import warnings
warnings.simplefilter('ignore')
import numpy as np
import matplotlib.pyplot as plt
import pandas
import geopandas
from pygridgen import Gridgen
from pygridtools import viz, iotools
def plotter(x, y, **kwargs):
figsize = kwargs.pop('figsize', (9, 9))
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
viz.plot_domain(domain, betacol='beta', ax=ax)
ax.set_xlim([0, 25])
ax.set_ylim([0, 25])
return viz.plot_cells(x, y, ax=ax, **kwargs)
domain = geopandas.read_file('basic_data/domain.geojson')
fig, ax = plt.subplots(figsize=(9, 9), subplot_kw={'aspect':'equal'})
fig = viz.plot_domain(domain, betacol='beta', ax=ax)
grid = Gridgen(domain.geometry.x, domain.geometry.y,
domain.beta, shape=(50, 50), ul_idx=2)
fig_orig, artists = plotter(grid.x, grid.y)
focus, focuser_widget = iotools.interactive_grid_focus(grid, n_points=3, plotfxn=plotter)
focuser_widget
reshaped, shaper_widget = iotools.interactive_grid_shape(grid, max_n=100, plotfxn=plotter)
shaper_widget
fig_orig
import json
from pathlib import Path
from tempfile import TemporaryDirectory
with TemporaryDirectory() as td:
f = Path(td, 'widget_grid.json')
with f.open('w') as grid_write:
json.dump(grid.to_spec(), grid_write)
with f.open('r') as grid_read:
spec = json.load(grid_read)
new_grid = Gridgen.from_spec(spec)
plotter(new_grid.x, new_grid.y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Main Tutorial
Step2: Loading and plotting the boundary data
Step3: Generating a grid with pygridgen, plotting with pygridtools
Step4: Interactively manipulate the Focus
Step5: Interactively change the number of nodes in the grid
Step6: Save, load, and recreate the altered grid without widgets
|
14,256
|
<ASSISTANT_TASK:>
Python Code:
running_id = 0
output = [[0]]
with open("E:/output.txt") as file_open:
for row in file_open.read().split("\n"):
cols = row.split(",")
if cols[0] == output[-1][0]:
output[-1].append(cols[1])
output[-1].append(True)
else:
output.append(cols)
output = output[1:]
for row in output:
if len(row) == 6:
row += [datetime(2016, 5, 3, 20, 36, 8, 92165), False]
output = output[1:-1]
def convert_to_days(dt):
day_diff = dt / np.timedelta64(1, 'D')
if day_diff == 0:
return 23.0
else:
return day_diff
df = pd.DataFrame(output, columns=["id", "advert_time", "male","age","search","brand","conversion_time","event"])
df["lifetime"] = pd.to_datetime(df["conversion_time"]) - pd.to_datetime(df["advert_time"])
df["lifetime"] = df["lifetime"].apply(convert_to_days)
df["male"] = df["male"].astype(int)
df["search"] = df["search"].astype(int)
df["brand"] = df["brand"].astype(int)
df["age"] = df["age"].astype(int)
df["event"] = df["event"].astype(int)
df = df.drop('advert_time', 1)
df = df.drop('conversion_time', 1)
df = df.set_index("id")
df = df.dropna(thresh=2)
df.median()
###Parametric Bayes
#Shout out to Cam Davidson-Pilon
## Example fully worked model using toy data
## Adapted from http://blog.yhat.com/posts/estimating-user-lifetimes-with-pymc.html
## Note that we've made some corrections
N = 2500
##Generate some random data
lifetime = pm.rweibull( 2, 5, size = N )
birth = pm.runiform(0, 10, N)
censor = ((birth + lifetime) >= 10)
lifetime_ = lifetime.copy()
lifetime_[censor] = 10 - birth[censor]
alpha = pm.Uniform('alpha', 0, 20)
beta = pm.Uniform('beta', 0, 20)
@pm.observed
def survival(value=lifetime_, alpha = alpha, beta = beta ):
return sum( (1-censor)*(log( alpha/beta) + (alpha-1)*log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(50000, 30000)
pm.Matplot.plot(mcmc)
mcmc.trace("alpha")[:]
censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist())
alpha = pm.Uniform("alpha", 0,50)
beta = pm.Uniform("beta", 0,50)
@pm.observed
def survival(value=df["lifetime"], alpha = alpha, beta = beta ):
return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(10000)
def weibull_median(alpha, beta):
return beta * ((log(2)) ** ( 1 / alpha))
plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))])
censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist())
alpha = pm.Uniform("alpha", 0,50)
beta = pm.Uniform("beta", 0,50)
@pm.observed
def survival(value=df["lifetime"], alpha = alpha, beta = beta ):
return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(10000, burn = 3000, thin = 20)
pm.Matplot.plot(mcmc)
#Solution to Q5
## Adjusting the priors impacts the overall result
## If we give a looser, less informative prior then we end up with a broader, shorter distribution
## If we give much more informative priors, then we get a tighter, taller distribution
censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist())
## Note the narrowing of the prior
alpha = pm.Normal("alpha", 1.7, 10000)
beta = pm.Normal("beta", 18.5, 10000)
####Uncomment this to see the result of looser priors
## Note this ends up pretty much the same as we're already very loose
#alpha = pm.Uniform("alpha", 0, 30)
#beta = pm.Uniform("beta", 0, 30)
@pm.observed
def survival(value=df["lifetime"], alpha = alpha, beta = beta ):
return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(10000, burn = 5000, thin = 20)
pm.Matplot.plot(mcmc)
#plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))])
#### Hypothesis testing
### Fit a cox proprtional hazards model
#### Plot baseline hazard function
#### Predict
#### Plot survival functions for different covariates
#### Plot some odds
#### BMA Coefficient values
#### Different priors
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problems
Step2: Problems
Step3: Problems
Step4: If we want to look at covariates, we need a new approach.
Step5: Once we've fit the data, we need to do something useful with it. Try to do the following things
Step6: Model selection
|
14,257
|
<ASSISTANT_TASK:>
Python Code:
%tensorflow_version 1.x
!curl -Lo deepchem_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import deepchem_installer
%time deepchem_installer.install(version='2.3.0')
!wget https://raw.githubusercontent.com/deepchem/deepchem/master/datasets/delaney-processed.csv
from deepchem.utils.save import load_from_disk
dataset_file= "delaney-processed.csv"
dataset = load_from_disk(dataset_file)
print("Columns of dataset: %s" % str(dataset.columns.values))
print("Number of examples in dataset: %s" % str(dataset.shape[0]))
import tempfile
from rdkit import Chem
from rdkit.Chem import Draw
from itertools import islice
from IPython.display import Image, display
def display_images(filenames):
Helper to pretty-print images.
for file in filenames:
display(Image(file))
def mols_to_pngs(mols, basename="test"):
Helper to write RDKit mols to png files.
filenames = []
for i, mol in enumerate(mols):
filename = "%s%d.png" % (basename, i)
Draw.MolToFile(mol, filename)
filenames.append(filename)
return filenames
num_to_display = 14
molecules = []
for _, data in islice(dataset.iterrows(), num_to_display):
molecules.append(Chem.MolFromSmiles(data["smiles"]))
display_images(mols_to_pngs(molecules))
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
solubilities = np.array(dataset["measured log solubility in mols per litre"])
n, bins, patches = plt.hist(solubilities, 50, facecolor='green', alpha=0.75)
plt.xlabel('Measured log-solubility in mols/liter')
plt.ylabel('Number of compounds')
plt.title(r'Histogram of solubilities')
plt.grid(True)
plt.show()
import deepchem as dc
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.CSVLoader(
tasks=["measured log solubility in mols per litre"], smiles_field="smiles",
featurizer=featurizer)
dataset = loader.featurize(dataset_file)
splitter = dc.splits.ScaffoldSplitter(dataset_file)
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset)
train_mols = [Chem.MolFromSmiles(compound)
for compound in train_dataset.ids]
display_images(mols_to_pngs(train_mols[:10], basename="train"))
valid_mols = [Chem.MolFromSmiles(compound)
for compound in valid_dataset.ids]
display_images(mols_to_pngs(valid_mols[:10], basename="valid"))
transformers = [
dc.trans.NormalizationTransformer(transform_y=True, dataset=train_dataset)]
for dataset in [train_dataset, valid_dataset, test_dataset]:
for transformer in transformers:
dataset = transformer.transform(dataset)
from sklearn.ensemble import RandomForestRegressor
sklearn_model = RandomForestRegressor(n_estimators=100)
model = dc.models.SklearnModel(sklearn_model)
model.fit(train_dataset)
from deepchem.utils.evaluate import Evaluator
metric = dc.metrics.Metric(dc.metrics.r2_score)
evaluator = Evaluator(model, valid_dataset, transformers)
r2score = evaluator.compute_model_performance([metric])
print(r2score)
def rf_model_builder(model_params, model_dir):
sklearn_model = RandomForestRegressor(**model_params)
return dc.models.SklearnModel(sklearn_model, model_dir)
params_dict = {
"n_estimators": [10, 100],
"max_features": ["auto", "sqrt", "log2", None],
}
metric = dc.metrics.Metric(dc.metrics.r2_score)
optimizer = dc.hyper.HyperparamOpt(rf_model_builder)
best_rf, best_rf_hyperparams, all_rf_results = optimizer.hyperparam_search(
params_dict, train_dataset, valid_dataset, transformers,
metric=metric)
import numpy.random
params_dict = {"learning_rate": np.power(10., np.random.uniform(-5, -3, size=1)),
"decay": np.power(10, np.random.uniform(-6, -4, size=1)),
"nb_epoch": [20] }
n_features = train_dataset.get_data_shape()[0]
def model_builder(model_params, model_dir):
model = dc.models.MultitaskRegressor(
1, n_features, layer_sizes=[1000], dropouts=[.25],
batch_size=50, **model_params)
return model
optimizer = dc.hyper.HyperparamOpt(model_builder)
best_dnn, best_dnn_hyperparams, all_dnn_results = optimizer.hyperparam_search(
params_dict, train_dataset, valid_dataset, transformers,
metric=metric)
rf_test_evaluator = Evaluator(best_rf, test_dataset, transformers)
rf_test_r2score = rf_test_evaluator.compute_model_performance([metric])
print("RF Test set R^2 %f" % (rf_test_r2score["r2_score"]))
dnn_test_evaluator = Evaluator(best_dnn, test_dataset, transformers)
dnn_test_r2score = dnn_test_evaluator.compute_model_performance([metric])
print("DNN Test set R^2 %f" % (dnn_test_r2score["r2_score"]))
task = "measured log solubility in mols per litre"
predicted_test = best_rf.predict(test_dataset)
true_test = test_dataset.y
plt.scatter(predicted_test, true_test)
plt.xlabel('Predicted log-solubility in mols/liter')
plt.ylabel('True log-solubility in mols/liter')
plt.title(r'RF- predicted vs. true log-solubilities')
plt.show()
task = "measured log solubility in mols per litre"
predicted_test = best_dnn.predict(test_dataset)
true_test = test_dataset.y
plt.scatter(predicted_test, true_test)
plt.xlabel('Predicted log-solubility in mols/liter')
plt.ylabel('True log-solubility in mols/liter')
plt.title(r'DNN predicted vs. true log-solubilities')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We need to load a dataset of estimated aqueous solubility measurements [1] into deepchem. The data is in CSV format and contains SMILES strings, predicted aqueaous solubilities, and a number of extraneous (for our purposes) molecular properties. Here is an example line from the dataset
Step4: To gain a visual understanding of compounds in our dataset, let's draw them using rdkit. We define a couple of helper functions to get started.
Step5: Now, we display some compounds from the dataset
Step6: Analyzing the distribution of solubilities shows us a nice spread of data.
Step7: With our preliminary analysis completed, we return to the original goal of constructing a predictive statistical model of molecular solubility using deepchem. The first step in creating such a molecule is translating each compound into a vectorial format that can be understood by statistical learning techniques. This process is commonly called featurization. deepchem packages a number of commonly used featurization for user convenience. In this tutorial, we will use ECPF4 fingeprints [3].
Step8: Now, let's perform the actual featurization. deepchem provides the CSVLoader class for this purpose. The featurize() method for this class loads data from disk and uses provided Featurizerinstances to transform the provided data into feature vectors.
Step9: When constructing statistical models, it's necessary to separate the provided data into train/test subsets. The train subset is used to learn the statistical model, while the test subset is used to evaluate the learned model. In practice, it's often useful to elaborate this split further and perform a train/validation/test split. The validation set is used to perform model selection. Proposed models are evaluated on the validation-set, and the best performed model is at the end tested on the test-set.
Step10: Let's visually inspect some of the molecules in the separate splits to verify that they appear structurally dissimilar. The FeaturizedSamples class provides an itersamples method that lets us obtain the underlying compounds in each split.
Step11: Notice the visual distinction between the train/validation splits. The most-common scaffolds are reserved for the train split, with the rarer scaffolds allotted to validation/test.
Step12: The next step after processing the data is to start fitting simple learning models to our data. deepchem provides a number of machine-learning model classes.
Step13: We next evaluate the model on the validation set to see its predictive power. deepchem provides the Evaluator class to facilitate this process. To evaluate the constructed model object, create a new Evaluator instance and call the compute_model_performance() method.
Step14: The performance of this basic random-forest model isn't very strong. To construct stronger models, let's attempt to optimize the hyperparameters (choices made in the model-specification) to achieve better performance. For random forests, we can tweak n_estimators which controls the number of trees in the forest, and max_features which controls the number of features to consider when performing a split. We now build a series of SklearnModels with different choices for n_estimators and max_features and evaluate performance on the validation set.
Step15: The best model achieves significantly higher $R^2$ on the validation set than the first model we constructed. Now, let's perform the same sort of hyperparameter search, but with a simple deep-network instead.
Step16: Now that we have a reasonable choice of hyperparameters, let's evaluate the performance of our best models on the test-set.
Step17: Now, let's plot the predicted $R^2$ scores versus the true $R^2$ scores for the constructed model.
|
14,258
|
<ASSISTANT_TASK:>
Python Code:
import LFPy
import MEAutility as mu
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
cellParameters = {
'morphology' : 'morphologies/L5_Mainen96_LFPy.hoc',
'tstart' : -50, # ignore startup transients
'tstop' : 100,
'dt' : 2**-4,
'v_init' : -60,
'passive' : False,
}
def instantiate_cell(cellParameters):
cell = LFPy.Cell(**cellParameters, delete_sections=True)
cell.set_pos(x=0, y=0, z=0)
cell.set_rotation(x=4.98919, y=-4.33261, z=np.pi) # Align apical dendrite with z-axis
# insert hh mechanism in everywhere, reduced density elsewhere
for sec in cell.allseclist:
sec.insert('hh')
if not 'soma' in sec.name():
# reduce density of Na- and K-channels to 5% in dendrites
sec.gnabar_hh = 0.006
sec.gkbar_hh = 0.0018
return cell
def plot_results(cell, electrode):
fig = plt.figure(figsize=(10, 6))
gs = GridSpec(2, 2)
ax = fig.add_subplot(gs[0, 1])
im = ax.pcolormesh(np.array(cell.t_ext), cell.z.mean(axis=-1), np.array(cell.v_ext),
cmap='RdBu', vmin=-100, vmax=100,
shading='auto')
ax.set_title('Applied extracellular potential')
ax.set_ylabel('z (um)', labelpad=0)
rect = np.array(ax.get_position().bounds)
rect[0] += rect[2] + 0.01
rect[2] = 0.01
cax = fig.add_axes(rect)
cbar = fig.colorbar(im, cax=cax, extend='both')
cbar.set_label('(mV)', labelpad=0)
ax = fig.add_subplot(gs[1, 1], sharex=ax)
ax.plot(cell.tvec, cell.somav, 'k')
ax.set_title('somatic voltage')
ax.set_ylabel('(mV)', labelpad=0)
ax.set_xlabel('t (ms)')
ax.set_ylim([-90, 20])
ax.set_xlim(cell.tvec[0], cell.tvec[-1])
ax = fig.add_subplot(gs[:, 0])
for sec in cell.allseclist:
idx = cell.get_idx(sec.name())
ax.plot(cell.x[idx], cell.z[idx],
color='k')
if 'soma' in sec.name():
ax.plot(cell.x[idx], cell.z[idx], color='b', lw=5)
ax.plot(electrode.x, electrode.z, marker='o', color='g', markersize=3)
ax.plot(electrode.x[stim_elec], electrode.z[stim_elec], marker='o', color='r', markersize=5)
ax.axis([-500, 500, -400, 1200])
cell = instantiate_cell(cellParameters)
probe = mu.return_mea('Neuronexus-32')
probe.rotate(axis=[0, 0, 1], theta=90)
probe.move([0, 100, 0])
amp = 20000
n_pulses = 2
interpulse = 10
width = 2
dt = cell.dt
t_stop = cell.tstop
t_start = 20
stim_elec = 15
current, t_ext = probe.set_current_pulses(el_id=stim_elec, amp1=amp, width1=width, dt=dt, t_stop=t_stop,
t_start=t_start, n_pulses=n_pulses, interpulse=interpulse)
plt.figure(figsize=(10, 6))
plt.plot(t_ext, current)
plt.title("Stimulating current")
plt.xlabel('t (ms)')
plt.ylabel('(nA)')
plt.xlim(0, cell.tstop)
electrode = LFPy.RecExtElectrode(cell=cell, probe=probe)
v_ext = cell.enable_extracellular_stimulation(electrode, t_ext=t_ext)
cell.simulate(probes=[electrode], rec_vmem=True)
plot_results(cell, electrode)
cell = instantiate_cell(cellParameters)
amp = -20000
n_pulses = 2
interpulse = 10
width = 2
dt = cell.dt
t_stop = cell.tstop
t_start = 20
stim_elec = 15
electrode = LFPy.RecExtElectrode(cell=cell, probe=probe)
current, t_ext = electrode.probe.set_current_pulses(el_id=stim_elec, amp1=amp, width1=width, dt=dt,
t_stop=t_stop, t_start=t_start, n_pulses=n_pulses,
interpulse=interpulse)
v_ext = cell.enable_extracellular_stimulation(electrode, t_ext=t_ext)
cell.simulate(probes=[electrode], rec_vmem=True)
plot_results(cell, electrode)
amp = -75000
electrode = LFPy.RecExtElectrode(cell=cell, probe=probe)
current, t_ext = electrode.probe.set_current_pulses(el_id=stim_elec, amp1=amp, width1=width, dt=dt,
t_stop=t_stop, t_start=t_start, n_pulses=n_pulses,
interpulse=interpulse)
cell = instantiate_cell(cellParameters)
v_ext = cell.enable_extracellular_stimulation(electrode, t_ext=t_ext)
cell.simulate(probes=[electrode], rec_vmem=True)
plot_results(cell, electrode)
amp = -30000
n_pulses = 1
interpulse = 10
width = 15
dt = cell.dt
t_stop = cell.tstop
t_start = 20
stim_elec = 15
electrode = LFPy.RecExtElectrode(cell=cell, probe=probe)
current, t_ext = electrode.probe.set_current_pulses(el_id=stim_elec, amp1=amp, width1=width, dt=dt,
t_stop=t_stop, t_start=t_start, n_pulses=n_pulses,
interpulse=interpulse)
cell = instantiate_cell(cellParameters)
v_ext = cell.enable_extracellular_stimulation(electrode, t_ext=t_ext)
cell.simulate(probes=[electrode], rec_vmem=True)
plot_results(cell, electrode)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create some dictionarys with parameters for cell
Step2: Create an helper function to instantiate a cell object given a set of parameters
Step3: Instantiate a LFPy.Cell object
Step4: Create an electrode using a commercially available design from Neuronexus
Step5: Rotate the probe and move it so that it is in the xz plane and 50 $\mu$m away from the soma
Step6: Create a pulse stimulation current
Step7: Create LFPy electrode object
Step8: Enable extracellular stimulation for the cell using stimulating currents of the electrode object
Step9: Run the simulation with electrode as input to cell.simulate()
Step10: Then plot the somatic potential, the extracellular field and the LFP
Step11: Positive pulses close to the soma location cause an hyperpolarization in the cell. Let's try something else!
Step12: Use the probe field in the electrode object created before to overwrite currents
Step13: Now the membrane potential is depolarizing, but stimulation is not strong enough to elicit an action potential.
Step14: Finally we got two spikes. We can maybe get the same effect with smaller currents and higher stimulation frequencies / number of pulses / pulse width. Try to increase the pulse width
|
14,259
|
<ASSISTANT_TASK:>
Python Code:
#If you haven't already, make sure you install the `dfcx-scrapi` library
!pip install dfcx-scrapi
from dfcx_scrapi.tools.copy_util import CopyUtil
creds_path = '<YOUR_CREDS_FILE>'
agent_id = '<YOUR_AGENT_ID>'
source_flow = 'Default Start Flow'
target_flow = 'My Target Flow'
cu = CopyUtil(creds_path)
flows_map = cu.flows.get_flows_map(agent_id, reverse=True)
my_pages = cu.pages.list_pages(flows_map[source_flow])
print('{} Page Count = {}'.format(source_flow, len(my_pages)))
subset_pages = [] # define a list placeholder for your Page proto objects
for page in my_pages:
if 'MyFlow -' in page.display_name:
subset_pages.append(page)
print('Total Pages to Copy = {}'.format(len(subset_pages)))
for page in subset_pages:
cu.pages.create_page(flows_map[target_flow], display_name=page.display_name)
# Step 1
subset_pages_prepped = cu.convert_from_source_page_dependencies(agent_id, subset_pages, source_flow)
# Step 2
final_pages = cu.convert_to_destination_page_dependencies(agent_id, subset_pages_prepped, target_flow)
for page in final_pages:
cu.pages.update_page(page.name, page)
print('Updated Page: {}'.format(page.display_name))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports
Step2: User Inputs
Step3: Get Flows Map from Agent
Step4: Get All Pages from Source Flow
Step5: Extract Subset of Pages To Copy
Step6: Create Page Shells in Target Flow
Step7: Modify Page Objects
Step8: Update Pages in Target Flow
|
14,260
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
# TODO 1: Read in the advertising.csv file and set it to a data frame called ad_data.
# TODO: Your code goes here
ad_data.head()
ad_data.info()
ad_data.describe()
ad_data.isnull().sum()
# TODO: Your code goes here
# TODO: Your code goes here
# TODO: Your code goes here
# TODO: Your code goes here
from sklearn.model_selection import train_test_split
X = ad_data[
[
"Daily Time Spent on Site",
"Age",
"Area Income",
"Daily Internet Usage",
"Male",
]
]
y = ad_data["Clicked on Ad"]
# TODO: Your code goes here
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(X_train, y_train)
predictions = logmodel.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, predictions))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the Dataset
Step2: Check the head of ad_data
Step3: Use info and describe() on ad_data
Step4: Let's check for any null values.
Step5: Exploratory Data Analysis (EDA)
Step6: TODO 1
Step7: TODO 2
Step8: TODO 1
Step9: Logistic Regression
Step10: Next, let's define the features and label. Briefly, feature is input; label is output. This applies to both classification and regression problems.
Step11: TODO 2
Step12: Train and fit a logistic regression model on the training set.
Step13: Predictions and Evaluations
Step14: Create a classification report for the model.
|
14,261
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'dwd', 'sandbox-2', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
14,262
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import string as str
train_data = pd.read_csv('train_data.csv',header = 0)
y = train_data["Target"]
y = y.values #convert to ndarray
train_data = train_data.drop("Target",1)
x = train_data.values
x = np.c_[np.ones(x.shape[0]),x]
def calculateCost (HypothesisFn,y,x):
distance = HypothesisFn - y
return (np.sum((distance)**2)/(2*distance.size))
def derivativeOf(HypothesisFn,y,x):
distance = HypothesisFn - y
deriv = np.dot(x.transpose(), distance)
deriv /= y.shape[0]
return deriv
Parameters = np.zeros(5) #initialize parameters
HypothesisFn = np.dot(x,Parameters)
LearningRate = 0.1
while True:
Parameters = Parameters - LearningRate*derivativeOf(HypothesisFn,y,x)
HypothesisFn = np.dot(x,Parameters)
cost = calculateCost (HypothesisFn,y,x)
if ( cost <= 10**(-25)):
break
# 10^-25 seems pretty negligible. = ¯\_(ツ)_/¯
print ("\nThe Hypothesis function is {0}".format(Parameters[0])),
for i in range(1,Parameters.size):
print (" + {0}x{1}".format(Parameters[i],i)),
test_input = pd.read_csv('test_input.csv',header = 0)
test_input = test_input.values
test_input = np.c_[np.ones(test_input.shape[0]),test_input]
prediction = np.dot (test_input, Parameters)
test_input = np.delete(test_input, (0), axis=1)
test_input = np.c_[test_input,prediction]
output = pd.DataFrame(data=test_input, columns = ["Variable 1","Variable 2","Variable 3","Variable 4","Prediction"])
output.to_csv('test_output.csv', index=False, header=True, sep=',')
print output #Boom shakalaka
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: x ==> Array of Feature Values
Step2: If anyone's wondering why I didn't transpose my Parameters array, one dimensional arrays do not need to be transposed to be multiplied. np.dot figures it out on its own.
|
14,263
|
<ASSISTANT_TASK:>
Python Code:
from fretbursts import *
sns = init_notebook()
import lmfit; lmfit.__version__
import phconvert; phconvert.__version__
url = 'http://files.figshare.com/2182604/12d_New_30p_320mW_steer_3.hdf5'
download_file(url, save_dir='./data')
filename = "./data/12d_New_30p_320mW_steer_3.hdf5"
import os
assert os.path.exists(filename)
d = loader.photon_hdf5(filename)
d.leakage = 0.038
d.gamma = 0.43
d.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto', F_bg=1.7)
d.burst_search(L=10, m=10, F=7)
mch_plot_bg(d)
dplot(d, hist_bg);
dplot(d, timetrace_bg);
dplot(d, timetrace)
xlim(2, 3); ylim(-100, 100);
#%matplotlib qt
#dplot(d, timetrace, scroll=True);
#ylim(-100, 100)
#%matplotlib inline
gamma = d.gamma
gamma
d.gamma = 1
ds = d.select_bursts(select_bursts.size, th1=30, gamma=1)
dplot(ds, hist_fret);
ds = d.select_bursts(select_bursts.size, th1=25, gamma=gamma, donor_ref=False)
dplot(ds, hist_fret);
ds = d.select_bursts(select_bursts.size, th1=25, gamma=gamma)
dplot(ds, hist_fret, weights='size', gamma=gamma);
dplot(ds, scatter_fret_nd_na); ylim(0,200);
ds.gamma = 1.
bext.bursts_fitter(ds, weights=None)
ds.E_fitter.fit_histogram(mfit.factory_two_gaussians(), verbose=False)
ds.E_fitter.params
dplot(ds, hist_fret, weights=None, show_model=True,
show_fit_stats=True, fit_from='p2_center');
# bl.two_gaussian_fit_EM??
EM_results = ds.fit_E_two_gauss_EM(weights=None, gamma=1.)
EM_results
ds.fit_E_name, ds.fit_E_res
ds.fit_E_model
AX = dplot(ds, hist_fret, weights=None)
x = np.r_[-0.2: 1.2 : 0.01]
for ich, (ax, E_fit) in enumerate(zip(AX.ravel(), EM_results)):
ax.axvline(E_fit, ls='--', color='r')
ax.plot(x, ds.fit_E_model(x, ds.fit_E_res[ich]))
print('E mean: %.2f%% E delta: %.2f%%' %\
(EM_results.mean()*100, (EM_results.max() - EM_results.min())*100))
import pandas as pd
EM_results = pd.DataFrame(ds.fit_E_res, columns=['p1_center', 'p1_sigma', 'p2_center', 'p2_sigma', 'p1_amplitude'])
EM_results * 100
ds.E_fitter.params * 100
(ds.E_fitter.params - EM_results) * 100
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Downloading the sample data file
Step2: Selecting a data file
Step3: Data load and Burst search
Step4: For convenience we can set the correction coefficients right away
Step5: NOTE
Step6: Perform a background plot as a function of the channel
Step7: Let's take a look at the photon waiting times histograms and at the fitted background rates
Step8: Using dplot exactly in the same way as for the single-spot
Step9: We can look at the timetrace of the photon stream (binning)
Step10: We can also open the same plot in an interactive window that allows scrolling (uncomment the following lines)
Step11: Burst selection and FRET
Step12: FRET Fitting
Step13: The fitted parameters are stored in a pandas DataFrame
Step14: Weighted Expectation Maximization
Step15: The fitted parameters for each channel are stored in the fit_E_res attribute
Step16: The model function is stored in
Step17: Let's plot the histogram and the model with parameters from the EM fit
Step18: Comparing 2-Gaussian and EM fit
Step19: And we compute the difference between the two sets of parameters
|
14,264
|
<ASSISTANT_TASK:>
Python Code:
from cave.cavefacade import CAVE
#cave = CAVE(["workflow-result"], "test_jupyter", ["."], file_format='BOHB')
cave = CAVE(folders=["./smac3/example_output/run_1", "./smac3/example_output/run_2"],
output_dir="cave_on_jupyter",
ta_exec_dir=["./smac3"],
verbose_level="OFF"
)
cave.overview_table()
cave.compare_default_incumbent()
cave.performance_table();
cave.plot_scatter()
cave.plot_ecdf()
cave.algorithm_footprints()
cave.configurator_footprint()
cave.feature_importance()
cave.pimp_forward_selection()
cave.cave_ablation()
cave.cave_fanova()
cave.pimp_comparison_table()
cave.algorithm_footprints()
cave.cost_over_time()
cave.parallel_coordinates()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can use the CAVE-object to generate general information in tables
Step2: Performance Analysis
Step3: Only available for instances, CAVE can provide scatter and cumulated distribution plots (using an EPM to estimate performance of config-instance pairs that were not actually executed). Individual plots will be generated for train and test instances (if distinction is made).
Step4: A special feature of CAVE is the analysis of algorithm footprints. Instances are mapped on a 2d-plot and marked blue if the selected algorithm is better than the other one. Use the tools on the right side to navigate within the plot.
Step5: But what is really interesting
|
14,265
|
<ASSISTANT_TASK:>
Python Code:
# %reload_ext autoreload
# %autoreload 2
%matplotlib inline
import torch
import torchvision
import numpy as np
# import mnist_loader
# train, valid, test = mnist_loader.load_data(path='data/mnist/')
# torchvision datasets are PIL.Image images of range [0,1]. Must trsfm them
# to Tensors of normalized range [-1,1]
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]
)
# I have no idea how this works
trainset = torchvision.datasets.MNIST(root='./data/mnist/', train=True,
download=True, transform=transform)
testset = torchvision.datasets.MNIST(root='./data/mnnist/', train=False,
download=False, transform=transform)
# ^ already downloaded w/ trainset; doesnt need to be `download=True`
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32,
shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=32,
shuffle=False, num_workers=4)
classes = tuple(str(i+1) for i in range(10))
classes
class FullNet(torch.nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.Linear(layers[i], layers[i+1]) for i in range(len(layers) - 1)
])
def forward(self, x):
x = x.view(x.size(0), -1)
for layer in self.layers:
layer_x = layer(x)
x = torch.nn.functional.relu(layer_x) # NOTE we dont use relu as last actvn
return torch.nn.functional.log_softmax(layer_x, dim=-1) # but we DO take the last layer's output
layers = [1*28*28, 40, 10] # MNIST dims: torch.Size([1, 28, 28]
network = FullNet(layers)
network
[param.numel() for param in network.parameters()]
# loss function
criterion = torch.nn.CrossEntropyLoss()
# backprop optimizer
optimizer = torch.optim.SGD(network.parameters(), lr=1e-2, momentum=0.9)
network.parameters
trainloader
# I don't know how to get just one minibatch out of trainloader but this works
for i, data in enumerate(trainloader):
if i == 0:
inp, lab = data
inp, lab = (torch.autograd.Variable(x) for x in (inp,lab))
outp = network(inp)
loss = criterion(outp, lab)
for elm in (loss.data, loss.data[0]): print(elm)
# some help from:
# http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#train-the-network
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, start=0):
# get inputs & wrap them in Variable
inputs, labels = data
inputs, labels = torch.autograd.Variable(inputs), torch.autograd.Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = network(inputs) # forward computation step
loss = criterion(outputs, labels) # loss calculation
loss.backward() # backward (backprop) comp.step (gradient calculations)
optimizer.step() # SGD step: update weights
# print statistics
running_loss += loss.data[0]
if i % 200 == 199: # print every 200 mini-batches
print(f'{[epoch+1, i+1]} loss: {running_loss/2000}')
print(f'Training Loop Complete')
# help from:
# http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#test-the-network-on-the-test-data
# oh so can you just wrap the DataLoader in `iter()` and get elements that way? ahhh..
dataiter = iter(testloader)
images, labels = dataiter.next()
# images
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
try:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
except NameError:
import matplotlib.pyplot as plt # not sure if Py3.6 supports condtl imports..
plt.imshow(np.transpose(npimg, (1, 2, 0)))
imshow(torchvision.utils.make_grid(images))
for i in range(len(labels)//8):
print(' ' + ' '.join(f'{labels[i*8 + j]:3d}' for j in range(8)))
# print(f'Ground Truth: '.join(f'{classes[labels[j]]:5s}' for j in range(4)))
# original
[[labels[i+j] for j in range(8)] for i in range(4)]
# corrected
[[labels[i*8 + j] for j in range(8)] for i in range(4)]
img = images[8]
img = img / 2 + 0.5
img = img.numpy()
img = np.transpose(img, (1,2,0))
img.shape
print(img[:,:,0].shape)
plt.imshow(img[:,:,0],cmap='gray');
labels[8]
img = torchvision.utils.make_grid(images)
img = img / 2 + 0.5
img = img.numpy() # (3, 122, 242)
# plt.imshow(np.transpose(img, (1, 2, 0))) # (122, 242, 3)
# plt.imshow(np.transpose(img, (0, 1, 2))) # (3, 122, 242) ## invalid dims
# plt.imshow(np.transpose(img, (0, 1, 2))[0]) # (122, 242) ## this works instead
# So, plt needs HxWxC, but will work with CxHxW iff C=1
# Our images from DataLoader are CxHxW
# Can use img = np.transpose(img, (1,2,0)) to convert CxHxW --> HxWxC
img = images[-1]
img = torchvision.utils.make_grid(img)
print(img.shape)
print(np.transpose(img, (1,2,0)).shape)
img = images[-1]
img = img / 2 + 0.5
img = img.numpy()
# img = np.transpose(img, (1,2,0))
# img.shape
plt.imshow(img[0], cmap='gray') # see: https://matplotlib.org/examples/color/colormaps_reference.html
print(img.shape)
# test run on first minibatch:
dataiter = iter(testloader)
images, labels = dataiter.next()
outputs = network(torch.autograd.Variable(images))
# btw, it seems getting further into iterators is where itertools comes in:
# https://stackoverflow.com/a/3267069
outputs.shape
_, predicted = torch.max(outputs.data, 1)
# output tensor is 32x10. That's 32 rows of values in 10 columns.
# torch.max(outputs.data, 1) specifies finding maximum value along axis-1 (ie: columnwise)
# torch.max(..) returns: (value, index)
imshow(torchvision.utils.make_grid(images))
# accuracy:
print(sum(labels == predicted)/len(labels)) # sum of torch.ByteTensor size 32
# predictions:
[[predicted[i*8 + j] for j in range(8)] for i in range(4)]
correct = 0
total = 0
for datum in testloader:
images, labels = datum
outputs = network(torch.autograd.Variable(images))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0) # labels.size() --> torch.Size([size])
correct += (predicted == labels).sum() # method of torch.ByteTensor
print(f'MNIST dataset accuracy: {100*np.round(correct/total, 4)}%\n{total} images.')
import cv2
import os
flist = os.listdir('data/digits/')
flist = ['data/digits/' + f for f in flist]
flist.remove('data/digits/digits.png')
flist
custom_labels = [f.split('-')[-1].split('.')[0] for f in flist]
# remove 'a' & 'b'
for i,elm in enumerate(custom_labels):
if len(elm) > 1:
custom_labels[i] = custom_labels[i][0]
custom_labels = [int(λ) for λ in custom_labels]
# labels should be of type: [torch.LongTensor of size 16] size acc. to num labels
custom_labels = torch.Tensor(custom_labels) # I think this'll do
images = [cv2.imread(f) for f in flist]
# reference for what datatype images will be:
# [torch.FloatTensor of size 16x1x28x28] --- last minibatch: 10000 % 32 = 16
# array of images from disk
images = [cv2.imread(f) for f in flist]
# resize images to 28x28
images = [cv2.resize(img, (28,28), interpolation=cv2.INTER_AREA) for img in images]
# convert 3-channel png to Grayscale (opencv loads as bgr)
images = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in images]
# invert grayscale (to be in line w/ MNIST data)
images = [255 - img for img in images]
# convert HxW dims to HxWxC for PyTorch usage later
images = [np.expand_dims(img, -1) for img in images]
## NOTE: need remove last dim to view in pyplot.imshow via: images[idx][:,:,0]
## another way to do resizings
# get transformation foreach image (x,y : c,r)
# trsfs = [[28/img.shape[1], 28/img.shape[0]] for img in images]
# img = cv2.resize(img, None, fx=28/img.shape[1], fy=28/img.shape[0], interpolation=cv2.INTER_AREA)
## create PyTorch dataset class
# from: http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#dataset-class
class custom_dataset(torch.utils.data.Dataset):
def __init__(self, data_array, transform=None): # going to want to put inputs&labels in as zipped tuples
self.data = data_array
self.transform=transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
Return a dict of image & label
image = self.data[idx][0]
label = self.data[idx][1]
if self.transform:
image = self.transform(image) # wait, does this alter self.data[idx][0] ??
# return {'image':image, 'label':label} ## I don't see key names in PT MNIST loader
return (image, label)
## create dataloader
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]
)
custom_data = custom_dataset(list(zip(images, custom_labels)), transform=transform)
custom_dataloader = torch.utils.data.DataLoader(custom_data, batch_size=32,
shuffle=False, num_workers=4)
## create batch-iterator of dataloader
custom_dataiter = iter(custom_dataloader)
## Test Network on Data
indata, labels = custom_dataiter.next()
indata = torch.autograd.Variable(indata)
outputs = network(indata)
_, predictions = torch.max(outputs, 1)
print('Y* Y\'\n------')
correct = 0
for elem in zip(labels, predictions.data):
correct += elem[0]==elem[1]
print(int(elem[0]), ' ', elem[1])
print('------')
total = len(labels)
print(f'A: {100*np.round((correct/total), 4)}%')
print('Y* Y\'\n------')
correct = 0
for elem in zip(labels, predictions.data):
correct += elem[0]==elem[1]
print(int(elem[0]), ' ', elem[1])
print('------')
total = len(labels)
print(f'A: {100*np.round((correct/total), 4)}%')
class ConvNet(torch.nn.Module):
def __init__(self, layers, c):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.Conv2d(layers[i], layers[i+1], kernel_size=3, stride=2)
for i in range(len(layers) - 1)
])
# self.pool = nn.AdaptiveMaxPool2d(1) # can use this to pool
self.out = torch.nn.Linear(layers[-1], c)
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = torch.nn.functional.adaptive_avg_pool2d(x, 1) # or this to pool
x = x.view(x.size(0), -1)
return torch.nn.functional.log_softmax(self.out(x), dim=-1)
class ConvNetMod(torch.nn.Module):
'Modern' ConvNet using large-field input conv layer
def __init__(self, layers, c):
super().__init__()
# 'modern' large-field input Conv layer
self.conv1 = torch.nn.Conv2d(1,10,kernel_size=5, stride=1, padding=2)
# Conv block
self.layers = torch.nn.ModuleList([
torch.nn.Conv2d(layers[i], layers[i+1], kernel_size=3, stride=2)
for i in range(len(layers) - 1)
])
# output classification Linear layer
self.out = torch.nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for layer in self.layers: # conv layers in convblock
x = layer(x)
x = torch.nn.functional.adaptive_max_pool2d(x, 1) # pool after conv block
x = x.view(x.size(0), -1) # reshape final featmap to vector
return torch.nn.functional.log_softmax(self.out(x), dim=-1) # apply output nonlinearity on final outputs
layers = [1, 20, 40, 80] # this may be too much -- filter size by last layer is ..? 7? 3.5?
c = 10
convnetwork = ConvNet(layers, c)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(convnetwork.parameters(), lr=0.01, momentum=0.9)
num_epochs = 2
for epoch in range(num_epochs):
running_loss = 0.0
for i, datum in enumerate(trainloader):
inputs, labels = datum
inputs, labels = torch.autograd.Variable(inputs), torch.autograd.Variable(labels)
optimizer.zero_grad()
outputs = convnetwork(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print(f'minibatch no. {i+1} Loss: {loss.data[0]/(i+1)}')
running_loss += loss.data[0]
if i % 200 == 199: # print every 200 mini-batches
print(f'{[epoch+1, i+1]} loss: {running_loss/200}')
running_loss = 0.0
print(f'Training Loop Complete')
num_epochs = 2
for epoch in range(num_epochs):
running_loss = 0.0
for i, datum in enumerate(trainloader):
inputs, labels = datum
inputs, labels = torch.autograd.Variable(inputs), torch.autograd.Variable(labels)
optimizer.zero_grad()
outputs = convnetwork(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print(f'minibatch no. {i+1} Loss: {loss.data[0]/(i+1)}')
running_loss += loss.data[0]
if i % 200 == 199: # print every 200 mini-batches
print(f'{[epoch+1, i+1]} loss: {running_loss/200}')
running_loss = 0.0
print(f'Training Loop Complete')
optimizer = torch.optim.SGD(convnetwork.parameters(), lr=0.005, momentum=0.9)
num_epochs = 2
for epoch in range(num_epochs):
running_loss = 0.0
for i, datum in enumerate(trainloader):
inputs, labels = datum
inputs, labels = torch.autograd.Variable(inputs), torch.autograd.Variable(labels)
optimizer.zero_grad()
outputs = convnetwork(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print(f'minibatch no. {i+1} Loss: {loss.data[0]/(i+1)}')
running_loss += loss.data[0]
if i % 200 == 199: # print every 200 mini-batches
print(f'{[epoch+1, i+1]} loss: {running_loss/200}')
running_loss = 0.0
print(f'Training Loop Complete')
layers = [10, 20, 40, 80] # this may be too much -- filter size by last layer is ..? 7? 3.5?
c = 10
convnetworkmod = ConvNetMod(layers, c)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(convnetworkmod.parameters(), lr=0.01, momentum=0.9)
num_epochs = 2
for epoch in range(num_epochs):
running_loss = 0.0
for i, datum in enumerate(trainloader):
inputs, labels = datum
inputs, labels = torch.autograd.Variable(inputs), torch.autograd.Variable(labels)
optimizer.zero_grad()
outputs = convnetworkmod(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print(f'minibatch no. {i+1} Loss: {loss.data[0]/(i+1)}')
running_loss += loss.data[0]
if i % 200 == 199: # print every 200 mini-batches
print(f'{[epoch+1, i+1]} loss: {running_loss/200}')
running_loss = 0.0
print(f'Training Loop Complete')
correct = 0
total = 0
for datum in testloader:
inputs, labels = datum
inputs = torch.autograd.Variable(inputs)
outputs = convnetwork(inputs)
_, predictions = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (labels == predictions).sum()
print(f'MNIST dataset accuracy: {100*np.round(correct/total, 4)}%\n{total} images.')
labels.size(0)
torch.autograd.Variable(labels).size(0)
len(labels)
len(torch.autograd.Variable(labels))
## NOTE: make sure the custom_dataloader from §8 is in memory
## create batch-iterator of dataloader
custom_dataiter = iter(custom_dataloader)
indata, labels = custom_dataiter.next()
indata = torch.autograd.Variable(indata)
outputs = convnetwork(indata)
_, predictions = torch.max(outputs, 1)
print('Y* Y\'\n------')
correct = 0
for elem in zip(labels, predictions.data):
correct += elem[0]==elem[1]
print(int(elem[0]), ' ', elem[1])
print('------')
total = len(labels)
print(f'A: {100*np.round((correct/total), 4)}%')
custom_dataiter = iter(custom_dataloader)
indata, labels = custom_dataiter.next()
indata = torch.autograd.Variable(indata)
outputs = convnetworkmod(indata)
_, predictions = torch.max(outputs, 1)
print('Y* Y\'\n------')
correct = 0
for elem in zip(labels, predictions.data):
correct += elem[0]==elem[1]
print(int(elem[0]), ' ', elem[1])
print('------')
total = len(labels)
print(f'A: {100*np.round((correct/total), 4)}%')
def viewimg(image_array, idx):
image = image_array[idx]
image = image[:,:,0]
image = image/2 + 0.5
image = 255 - image
plt.imshow(image, cmap='gray')
idx =0
viewimg(images, idx), print(f'Convnet\'s prediction: {predictions.data[idx]}')
idx =1
viewimg(images, idx), print(f'Convnet\'s prediction: {predictions.data[idx]}')
idx =4
viewimg(images, idx), print(f'Convnet\'s prediction: {predictions.data[idx]}')
idx =6
viewimg(images, idx), print(f'Convnet\'s prediction: {predictions.data[idx]}')
idx =20
viewimg(images, idx), print(f'Convnet\'s prediction: {predictions.data[idx]}')
idx =15
viewimg(images, idx), print(f'Convnet\'s prediction: {predictions.data[idx]}')
idx =19
viewimg(images, idx), print(f'Convnet\'s prediction: {predictions.data[idx]}')
idx =8
viewimg(images, idx), print(f'Convnet\'s prediction: {predictions.data[idx]}')
# custom_dataiter.next()
# dataiter = iter(testloader)
# dataiter.next()
custom_dataiter.next()
img = images[0]
torch.from_numpy(img.transpose((2,0,1)))
np.expand_dims(img, -1).shape
torch.from_numpy(np.expand_dims(img,-1).transpose((2,0,1)))
test
# custom_data.data[0]
custom_dataiter = iter(custom_data)
custom_dataiter.next()
type(custom_data.data[0][0])
a = [i for i in range(6)]
b = [chr(ord('a')+i) for i in range(6)]
c = list(zip(a,b))
c
ord('a')
torchvision.datasets.mnist??
trainset
# trainset[0]
# torchvision datasets are PIL.Image images of range [0,1]. Must trsfm them
# to Tensors of normalized range [-1,1]
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]
)
# I have no idea how this works
trainset = torchvision.datasets.MNIST(root='./data/mnist/', train=True,
download=True, transform=transform)
testset = torchvision.datasets.MNIST(root='./data/mnnist/', train=False,
download=False, transform=transform)
# ^ already downloaded w/ trainset; doesnt need to be `download=True`
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32,
shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=32,
shuffle=False, num_workers=4)
images[0].dtype
img = images[0]
img = np.expand_dims(img, -1)
img.shape
# compatibility for torchvision.transforms.Compose() -- expects NumPy ndarrays to be of
# shape: HxWxC as acc to error:
# 46 if isinstance(pic, np.ndarray):
# 47 # handle numpy array
# ---> 48 img = torch.from_numpy(pic.transpose((2, 0, 1))
images = [np.expand_dims(img, -1) for img in images] # cvt (28,28) --> (28,28,1)
images = [transform(img) for img in images]
custom_loader = torch.utils.data.dataloader.DataLoader((images,custom_labels),
batch_size=32,shuffle=False, num_workers=4)
# testset.test_data
images[0]
img = images[0]
img = img / 2 + 0.5
img = img.numpy()
# img = np.transpose(img, (1,2,0))
plt.imshow(img[0], cmap='gray')
y = network(torch.autograd.Variable(images[0]))
y
torch.max(y, 1)
dataiter = iter(testloader)
images, labels = dataiter.next()
x = dataiter.next()
# x[0][0]
testloader
testset
# custom_labels
custom_labels[0]
custom_dataset =
for i, layer in enumerate(layers, start=0): # 2nd arg is index start count
print(i, layer)
len(trainloader)
trainloader.batch_size
trainloader.dataset[0]
trainloader.dataset[0][0]
trainloader.dataset[1][0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Data Loading
Step2: 2. Network Definition
Step3: 3. Loss Function & Optimizer Definitions
Step4: 4. Training
Step5:
Step6: NOTE
Step7: okay, that's actually so cool.
Step8: Making sure the images match up with their corresponding labels
Step9: Aside
Step10: Having trouble now with 1 channel images in pyplot. 3 channel HxWxC is fine for 1 channel, HxW is fine, but both CxHxW and HxWxC fail if C = 1.
Step11: Sanity Check
Step12: And the images object being wrapped as a pytorch Variable & sent into the network is itself a torch.FloatTensor -- to answer the question of how do I get data into a PyTorch neuralnet again?
Step13: $93.75\%$ accuracy, though this is a notoriously easy dataset.
Step15: Awesome.
Step16: And here we see an immediate jumpy to ~ 81% accuracy on completely new images, thanks to preprocessing -- to make the images look like those the network trained on.
Step17: Predictions on images without preprocessing
Step19: Well that was super cool. The Linear/Fully-Connected/Dense Neural Net on it's own is very limited. It can't recognize spatially-correlated features. Maybe it can, but it's ability to do is ephemeral and a byproduct at best. I can do better with a simple ConvNet.
Step20: 9.1 Train ConvNet & ConvNetMod
Step21: That loss is about 1/3 what I got with the same training regimen on a Linear network.
Step22: ConvNetMod
Step23: Wow, nevermind. Introducing the large-receptive-field conv layer in the beginning really helped with the loss.
Step24: technically less accurate, but I know it has a lot more potential than the other method, and will be more resilient.
Step25: 9.3 Run ConvNets on custom data;
Step26: ConvNet after 4 epochs at lr=0.01 and 2 epochs at lr=0.05
Step27: ConvNetMod after 2 epochs of training
Step28: Aside
Step29: Although I wonder if I should have preprocessed my written digits to be color inverted. I see a lot of -1's in the PT MNIST data, and +1's in my own. They're both normalized in range $[-1,1]$, making me think that's the case. Luckily it's a simple NumPy or OpenCV operation away, and can just be done in the initial image-loading phase.
Step30: Okay, that looks like my problem. The images inside ... ahhhh... okay. Far as I can tell, here's what's up
Step31: Misc
|
14,266
|
<ASSISTANT_TASK:>
Python Code:
# import math lib
from math import pi
# import Qiskit
from qiskit import Aer, IBMQ, execute
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
# To use local qasm simulator
backend = Aer.get_backend('qasm_simulator')
theta_list = [0.01, 0.02, 0.03, 0.04, 0.05, 1.31, 1.32, 1.33, 1.34, 1.35]
# create Quantum Register called "qr" with 5 qubits
qr = QuantumRegister(5, name="qr")
# create Classical Register called "cr" with 5 bits
cr = ClassicalRegister(5, name="cr")
# Creating Quantum Circuit called "qc" involving your Quantum Register "qr"
# and your Classical Register "cr"
qc = QuantumCircuit(qr, cr, name="k_means")
#Define a loop to compute the distance between each pair of points
for i in range(9):
for j in range(1,10-i):
# Set the parament theta about different point
theta_1 = theta_list[i]
theta_2 = theta_list[i+j]
#Achieve the quantum circuit via qiskit
qc.h(qr[2])
qc.h(qr[1])
qc.h(qr[4])
qc.u3(theta_1, pi, pi, qr[1])
qc.u3(theta_2, pi, pi, qr[4])
qc.cswap(qr[2], qr[1], qr[4])
qc.h(qr[2])
qc.measure(qr[2], cr[2])
qc.reset(qr)
job = execute(qc, backend=backend, shots=1024)
result = job.result()
print(result)
print('theta_1:' + str(theta_1))
print('theta_2:' + str(theta_2))
# print( result.get_data(qc))
plot_histogram(result.get_counts())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this section, we first judge the version of Python and import the packages of qiskit, math to implement the following code. We show our algorithm on the ibm_qasm_simulator, if you need to run it on the real quantum conputer, please remove the "#" in frint of "import Qconfig".
Step2: Here we define the number pi in the math lib, because we need to use u3 gate. And we also define a list about the parameter theta which we need to use in the u3 gate. As the same above, if you want to implement on the real quantum comnputer, please remove the symbol "#" and configure your local Qconfig.py file.
|
14,267
|
<ASSISTANT_TASK:>
Python Code:
truth = "This is some text.\nMore text, but on a different line!\nInsert your favorite meme here.\n"
pred = read_file_contents("q1data/file1.txt")
assert truth == pred
retval = -1
try:
retval = read_file_contents("nonexistent/path.txt")
except:
assert False
else:
assert retval is None
truth = "Yo dawg, I heard yo and yo dawg like yo-yos.\nSo we put yo dawg in a yo-yo.\nSo yo can yo-yo yo dawg while yo dawg yo-yos, dawg.\nMaximum ridiculousness reached.\n"
pred = read_file("q1data/file2.txt")
assert truth == pred
truth = ['Yo dawg, I heard yo and yo dawg like yo-yos.\n',
'So we put yo dawg in a yo-yo.\n',
'So yo can yo-yo yo dawg while yo dawg yo-yos, dawg.\n',
'Maximum ridiculousness reached.\n']
pred = read_file("q1data/file2.txt", as_list = True)
for item in truth:
assert item in pred
for item in pred:
assert item in truth
retval = -1
try:
retval = read_file("another/nonexistent/path.txt")
except:
assert False
else:
assert retval is None
import os.path
assert count_lines("q1data/file1.txt", "q1data/file1_out.txt")
assert os.path.exists("q1data/file1_out.txt")
assert int(open("q1data/file1_out.txt", "r").read()) == 3
r1 = None
try:
r1 = count_lines("yet/another/nonexistent/path.txt", "meaningless")
except:
assert False
else:
assert not r1
r2 = None
try:
r2 = count_lines("q1data/file1.txt", "/this/should/throw/an/error.txt")
except:
assert False
else:
assert not r2
if os.path.exists("q1data/out_again.txt"):
os.remove("q1data/out_again.txt")
assert acount_lines("q1data/file1.txt", "q1data/out_again.txt")
assert os.path.exists("q1data/out_again.txt")
assert int(open("q1data/out_again.txt", "r").read()) == 3
assert acount_lines("q1data/file2.txt", "q1data/out_again.txt")
assert os.path.exists("q1data/out_again.txt")
assert int("".join(open("q1data/out_again.txt", "r").read().split("\n"))) == 34
r1 = None
try:
r1 = acount_lines("yet/another/nonexistent/path.txt", "meaningless")
except:
assert False
else:
assert not r1
r2 = None
try:
r2 = acount_lines("q1data/file2.txt", "/this/should/throw/an/error.txt")
except:
assert False
else:
assert not r2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: B
Step2: C
Step3: D
|
14,268
|
<ASSISTANT_TASK:>
Python Code:
!pip install --upgrade pymongo
from pprint import pprint as pp
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
matplotlib.style.use('ggplot')
%%bash
sudo apt-get update
sudo apt-get install -y mongodb-clients
%%bash
cat << END | mongo --host mongo_mongocfg1_1
rs.initiate(); // Iniciar el replica set de los servidores de configuración
sleep(1000);
rs.status()
END
%%bash
cat <<END | mongo --host mongo_mongors1_1
rs.initiate(); // Inicio del RS
sleep(1000);
cfg = rs.conf();
cfg.members[0].host = "mongo_mongors1_1"; // Cambiar el host porque docker coge el hostname del contenedor
rs.reconfig(cfg); // Reconfigurar
//rs.add(host2/3) // Añadir posibles nuevos hosts añadidos al RS
rs.status()
END
%%bash
cat <<END | mongo --host mongo_mongors2_1
rs.initiate();
sleep(1000);
cfg = rs.conf();
cfg.members[0].host = "mongo_mongors2_1";
rs.reconfig(cfg);
//rs.add(host2/3)
rs.status()
END
import pymongo
from pymongo import MongoClient
client = MongoClient("mongo_mongors1_1",27017)
client
client.database_names()
%%bash
file=../Posts.csv
test -e $file || wget http://neuromancer.inf.um.es:8080/es.stackoverflow/`basename ${file}`.gz -O - 2>/dev/null | gunzip > $file
db = client.stackoverflow
db = client['stackoverflow']
db
posts = db.posts
posts
%%bash
mongoimport --db stackoverflow --collection posts --drop --type csv \
--headerline --host=mongo_mongors1_1 --file ../Posts.csv
posts.count()
posts.create_index('Id')
%%bash
cat <<END | mongo --host mongo_mongos_1
sh.addShard("mongors1/mongo_mongors1_1:27017");
sh.addShard("mongors2/mongo_mongors2_1:27017");
sh.enableSharding("stackoverflow");
sh.shardCollection('stackoverflow.posts', {'Id': 1})
END
%%bash
cat <<END | mongo --host mongo_mongos_1
sh.status()
END
post = posts.find_one()
post
posts.find({"PostTypeId": 2}).explain()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Usaremos la librería pymongo para python. La cargamos a continuación.
Step2: La conexión se inicia con MongoClient en el host descrito en el fichero docker-compose.yml (mongo).
Step3: Importación de los ficheros CSV. Por ahora creamos una colección diferente para cada uno. Después estudiaremos cómo poder optimizar el acceso usando agregación.
Step4: Creamos el índice por el que se creará el shard. Por defecto es de tipo ASCENDING.
Step5: Añadimos al shard ambos ordenadores de ambos replica sets, y activamos el sharding en la tabla posts.
Step6:
|
14,269
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
dta = sm.datasets.macrodata.load_pandas().data
index = pd.Index(sm.tsa.datetools.dates_from_range("1959Q1", "2009Q3"))
print(index)
dta.index = index
del dta["year"]
del dta["quarter"]
print(sm.datasets.macrodata.NOTE)
print(dta.head(10))
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
dta.realgdp.plot(ax=ax)
legend = ax.legend(loc="upper left")
legend.prop.set_size(20)
gdp_cycle, gdp_trend = sm.tsa.filters.hpfilter(dta.realgdp)
gdp_decomp = dta[["realgdp"]].copy()
gdp_decomp["cycle"] = gdp_cycle
gdp_decomp["trend"] = gdp_trend
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
gdp_decomp[["realgdp", "trend"]]["2000-03-31":].plot(ax=ax, fontsize=16)
legend = ax.get_legend()
legend.prop.set_size(20)
bk_cycles = sm.tsa.filters.bkfilter(dta[["infl", "unemp"]])
fig = plt.figure(figsize=(12, 10))
ax = fig.add_subplot(111)
bk_cycles.plot(ax=ax, style=["r--", "b-"])
print(sm.tsa.stattools.adfuller(dta["unemp"])[:3])
print(sm.tsa.stattools.adfuller(dta["infl"])[:3])
cf_cycles, cf_trend = sm.tsa.filters.cffilter(dta[["infl", "unemp"]])
print(cf_cycles.head(10))
fig = plt.figure(figsize=(14, 10))
ax = fig.add_subplot(111)
cf_cycles.plot(ax=ax, style=["r--", "b-"])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hodrick-Prescott Filter
Step2: Baxter-King approximate band-pass filter
Step3: We lose K observations on both ends. It is suggested to use K=12 for quarterly data.
Step4: Christiano-Fitzgerald approximate band-pass filter
|
14,270
|
<ASSISTANT_TASK:>
Python Code:
# Make a dictionary with {} and : to signify a key and a value
my_dict = {'key1':'value1','key2':'value2'}
# Call values by their key
my_dict['key2']
my_dict = {'key1':123,'key2':[12,23,33],'key3':['item0','item1','item2']}
#Lets call items from the dictionary
my_dict['key3']
# Can call an index on that value
my_dict['key3'][0]
#Can then even call methods on that value
my_dict['key3'][0].upper()
my_dict['key1']
# Subtract 123 from the value
my_dict['key1'] = my_dict['key1'] - 123
#Check
my_dict['key1']
# Set the object equal to itself minus 123
my_dict['key1'] -= 123
my_dict['key1']
# Create a new dictionary
d = {}
# Create a new key through assignment
d['animal'] = 'Dog'
# Can do this with any object
d['answer'] = 42
#Show
d
# Dictionary nested inside a dictionary nested in side a dictionary
d = {'key1':{'nestkey':{'subnestkey':'value'}}}
# Keep calling the keys
d['key1']['nestkey']['subnestkey']
# Create a typical dictionary
d = {'key1':1,'key2':2,'key3':3}
# Method to return a list of all keys
d.keys()
# Method to grab all values
d.values()
# Method to return tuples of all items (we'll learn about tuples soon)
d.items()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Its important to note that dictionaries are very flexible in the data types they can hold. For example
Step2: We can effect the values of a key as well. For instance
Step3: A quick note, Python has a built-in method of doing a self subtraction or addition (or multiplication or division). We could have also used += or -= for the above statement. For example
Step4: We can also create keys by assignment. For instance if we started off with an empty dictionary, we could continually add to it
Step5: Nesting with Dictionaries
Step6: Wow! Thats a quite the inception of dictionaries! Let's see how we can grab that value
Step7: A few Dictionary Methods
|
14,271
|
<ASSISTANT_TASK:>
Python Code:
import sys, os
from adaptivemd import Project, Event, FunctionalEvent, Trajectory
project = Project('tutorial')
print project.tasks
print project.trajectories
print project.models
engine = project.generators['openmm']
modeller = project.generators['pyemma']
pdb_file = project.files['initial_pdb']
print pdb_file.get_file()[:1000] + ' [...]'
file_name = next(project.traj_name) # get a unique new filename
trajectory = Trajectory(
location=file_name, # this creates a new filename
frame=pdb_file, # initial frame is the PDB
length=100, # length is 100 frames
engine=engine # the engine to be used
)
trajectory = project.new_trajectory(
frame=pdb_file,
length=100,
engine=engine,
number=1 # if more then one you get a list of trajectories
)
task_run = engine.run(trajectory)
task_extend = engine.extend(trajectory, 50)
project.queue(task_run, task_extend)
engine.native_stride
# task = trajectory.run().extend(50)
# task = trajectory.run().extend([10] * 10)
for t in project.trajectories:
print t.short, t.length
for f in project.files:
print f
from adaptivemd import DT
for t in project.files.c(Trajectory):
print t.short, t.length,
if t.created:
if t.created > 0:
print 'created @ %s' % DT(t.created)
else:
print 'modified @ %s' % DT(-t.created)
else:
print 'not existent'
trajectory = project.new_trajectory(engine['system_file'], 100)
task = engine.run(trajectory)
project.queue(task)
task.state
print task.stdout
print task.stderr
# project.queue(project.new_trajectory(pdb_file, 100, engine).run()) can be called as
project.queue(project.new_trajectory(pdb_file, 100, engine))
trajectory = project.trajectories.one
frame = trajectory[28]
print frame, frame.exists
frame = trajectory[30]
print frame, frame.exists
frame = trajectory[28]
task = project.new_trajectory(frame, 100, engine).run()
print task
frame = trajectory[30]
task = project.new_trajectory(frame, 100, engine).run()
print task
print task.description
project.queue(task)
project.wait_until(task.is_done)
task.state
from adaptivemd.analysis.pyemma import PyEMMAAnalysis
modeller = PyEMMAAnalysis(
engine=engine,
outtype='protein',
features={'add_inverse_distances': {'select_Backbone': None}}
).named('pyemma')
task = modeller.execute(list(project.trajectories))
project.queue(task)
project.wait_until(task.is_done)
for m in project.models:
print m
model = project.models.last
print model['msm']['P']
project.find_ml_next_frame(4)
trajectories = project.new_ml_trajectory(length=100, number=4, engine=engine)
trajectories
project.queue(trajectories)
project.trigger()
for w in project.workers:
if w.state == 'running':
print '[%s:%s] %s:%s' % (w.state, DT(w.seen).time, w.hostname, w.cwd)
# project.workers.last.command = 'shutdown'
project.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's open our test project by its name. If you completed the previous example this should all work out of the box.
Step2: Open all connections to the MongoDB and Session so we can get started.
Step3: Now restore our old ways to generate tasks by loading the previously used generators.
Step4: Remember that we stored some files in the database and of course you can look at them again, should that be important.
Step5: The Trajectory object
Step6: Since this is tedious to write there is a shortcut
Step7: Like in the first example, now that we have the parameters of the Trajectory we can create the task to do that.
Step8: This was easy, but we can do some interesting stuff. Since we know the trajectory will exist now we can also extend by some frames. Remember, the trajectory does not really exist yet (not until we submit it and a worker executes it), but we can pretend that it does, since it's relevant propertier are set.
Step9: The only problem is to make sure the tasks are run in the correct order. This would not be a problem if the worker will run tasks in the order they are place in the queue, but that defeats the purpose of parallel runs. Therefore an extended tasks knows that is depends on the existance of the source trajectory. The worker will hence only run a trajectory, once the source exists.
Step10: A not on simulation length
Step11: simpler function calls
Step12: This will create two tasks that first runs the trajectory and then extend it by 50 frames (in native engine frames)
Step13: This will create 10! tasks that eacht will extend the previous one. Each of the task requires the previous one to finish, this way the dependency is preserved. You can use this to mimick using several restarts in between and it also means that you have no idea which worker will actually start and which worker will continue or finish a trajectory.
Step14: If this works, then you should see one 100 frame trajectory from the setup (first example) and a second 150 length trajectory that we just generated by running 100 frames and extending it by another 50.
Step15: Now all files filtered by [c]lass Trajectory. DT is a little helper to convert time stamps into something readable.
Step16: You see, that the extended trajecory appears twice once with length 100 and once with length 150. This is correct, because at the idea of a 100 frame trajectory was used and hence is saved. But why does this one not appear in the list of trajectories. It was created first and had a timestamp of creation written to .created. This is the time when the worker finishes and was successful.
Step17: Well, nothing changed obviously and we expect it to fail. So let's inspect what happened.
Step18: You might need to execute this cell several times. It will first become queued, then running and finally fail and stop there.
Step19: We see, what we expect. In openmmrun.py the openmm executable it could not load the pdb file.
Step20: Trajectories from other trajectories
Step21: Good, at least 100 frames. We pick, say, frame at index 28 (which is the 29th frame, we start counting at zero) using the way you pick an element from a python list (which is almost what a Trajectory represents, a list of frames)
Step22: This part is important! We are running only one full atom trajectory with stride larger than one, so if we want to pick a frame from this trajectory you can pick in theory every frame, but only some of these really exist. If you want to restart from a frame this needs to be the case. Otherwise you run into trouble.
Step23: See, how the actual frame picked in the mdconvert line is -i 3 meaning index 3 which represents frame 30 with stride 10.
Step24: Btw, you can wait until something happens using project.wait_until(condition). This is not so useful in notebooks, but in scripts it does. condition here is a function that evaluates to True or False. it will be tested in regular intervals and once it is True the function returns.
Step25: Each Task has a function is_done that you can use. It will return once a task is done. That means it either failed or succeeded or was cancelled. Basically when it is not queued anymore.
Step26: The instance to compute an MSM model of existing trajectories that you pass it. It is initialized with a .pdb file that is used to create features between the $c_\alpha$ atoms. This implementaton requires a PDB but in general this is not necessay. It is specific to my PyEMMAAnalysis show case.
Step27: Again we name it pyemma for later reference.
Step28: So we generated one model. The Model objects contain (in the base version) only a .data attribute which is a dictionary of information about the generated model.
Step29: Pick frames automatically
Step30: So you can pick states according to the newest (last) model. (This will be moved to the Brain). And since we want trajectories with these frames as starting points there is also a function for that
Step31: Let's submit these before we finish this notebook with a quick discussion of workers
Step32: That's it.
Step33: Okay, the worker is running, was last reporting its heartbeat at ... and has a hostname and current working directory (where it was executed from). The generators specify which tasks from some generators are executed. If it is None then the worker runs all tasks it finds. You can use this to run specific workers for models and some for trajectory generation.
Step34: Afterwards you need to restart you worker to continue with this examples.
|
14,272
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
df = pd.read_csv('./asset/sydney_housing_market.txt', sep='\t')
df.head()
pd.pivot_table(df, index=['type'])
pd.pivot_table(df, index=['type'], aggfunc={'distance_to_CBD':np.mean, 'sold':np.sum})
df['value']=df['value'].replace('[\$,]','',regex=True).astype(float)
pd.pivot_table(df, index=['type'])
pd.pivot_table(df, index=['type','council'])
pd.pivot_table(df, index=['council'], columns=['type'])
pd.pivot_table(df, index=['type'],aggfunc=np.sum)
pd.pivot_table(df, index=['type','council'], aggfunc=np.sum, margins=True)
pd.pivot_table(df, index=['council','suburb'],
columns=['type'],
values=['sold', 'value'],
aggfunc={'sold':np.sum, 'value':np.mean},
margins=True)
table = pd.pivot_table(df, index=['council'], columns=['type'], values=['sold'], aggfunc=np.sum, margins=True)
table
table.query('council==["Randwick"]')
table.query('council==["Rockdale","Lane Cove"]')
plot_table = table[:-1] # get rid of ALL
plot_table.sold.house
table.sold.house
plot_table.index
trace1 = go.Bar(
x=plot_table.index,
y=plot_table.sold.house,
name='House'
)
trace2 = go.Bar(
x=plot_table.index,
y=plot_table.sold.unit,
name='Unit'
)
data = [trace1, trace2]
layout = go.Layout(
barmode='group'
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='pandas-notebook-plot2')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import data
Step2: Pivot Table
Step3: Note that the default aggregation function is np.mean. We can specify the aggregation function in the aggfunc parameter, as shown below.
Step4: For simplicity, we will stick with the default aggregation function.
Step5: We could also choose more than one column as index
Step6: columns provide an additional way to segment the data
Step7: Note that NaN implies that there is no data here
Step8: Use margins=True to show the total numbers
Step9: We should use avg for value but sum for sold, and we do not want to see distance_to_CBD for now
Step10: Advanced Filtering over Pivot Table
Step11: We can just look at data from one city
Step12: We can also specify multiple values
Step13: Note
|
14,273
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncar', 'sandbox-3', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
14,274
|
<ASSISTANT_TASK:>
Python Code:
!which howdoi
!howdoi --help #注意我这里是在jupyter notebook里面直接使用的,所以需要加感叹号。如果是在terminal上,不需要加叹号。
!howdoi --num-answers 3 python lambda function list comprehension
!howdoi --num-answer 3 python numpy array create
!ls /Users/ywfang/FANG/git/howdoi_ywfang/howdoi
!sed -n '70,120p' /Users/ywfang/FANG/git/howdoi_ywfang/howdoi/howdoi.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 通过帮助文档,我们可以了解到HowDoI大概的工作模式以及它的一些功能,例如可以colorize the output,get multiple answers,
Step2: Read HowDoI's code
Step3: 通过浏览howdoi.py,我们发现这里面定义了很多新的函数,而且每个函数都会在之后的函数中被引用,这是的我们可以方便follow。
|
14,275
|
<ASSISTANT_TASK:>
Python Code:
from array import array
import reprlib
import math
import numbers
import functools
import operator
import itertools
class Vector:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
return len(self) == len(other) and all(a == b for a, b in zip(self, other))
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
# 上面都一样
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self) # 获取实例所属的类
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral): # index 是 int 或其他整数类型
return self._components[index]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
def __setattr__(self, name, value):
cls = type(self)
if len(name) == 1:
if name in cls.shortcut_names:
error = 'readonly attribute {attr_name!}'
elif name.islower():
error = "can't set attributes 'a' to 'z' in {cls_name!r}"
else:
error = ''
if error:
msg = error.format(cls_name = cls.__name__, attr_name = name) # 这个方法好,无论错误是哪个,都可以给定值
raise AttributeError(msg)
super().__setattr__(name, value) # 默认情况,调用超类的 __setattr__ 方法,提供标准行为
def __hash__(self):
hashs = (hash(x) for x in self._components) # 注意这里是生成器表达式,不是列表推导式,可以节省内存
return functools.reduce(operator.xor, hashs)
def angle(self, n):
r = math.sqrt(sum(x * x for x in self[n:]))
a = math.atan2(r, self[n-1])
if (n == len(self) - 1) and (self[-1] < 0):
return math.pi * 2 - a
else:
return a
def angles(self):
return (self.angle(n) for n in range(1, len(self)))
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('h'):
fmt_spec = fmt_spec[:-1]
coords = itertools.chain([abs(self)], # 使用 chain 函数生成生成器表达式,无缝迭代向量的模和各个角坐标
self.angles())
outer_fmt = '<{}>' # 球面坐标
else:
coords = self
outer_fmt = '({})' # 笛卡尔坐标
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(', '.join(components))
def __neg__(self):
return Vector(-x for x in self)
def __pos__(self):
return Vector(self)
import decimal
ctx = decimal.getcontext()
ctx.prec = 40 # 精度设为 40
one_third = decimal.Decimal('1') / decimal.Decimal('3')
one_third
one_third == +one_third
ctx.prec = 28 #精度设为 28
one_third == +one_third
+one_third
from collections import Counter
ct = Counter('abracadabra')
ct
ct['r'] = -3
ct['d'] = 0
ct
+ct
from array import array
import reprlib
import math
import numbers
import functools
import operator
import itertools
class Vector:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
return len(self) == len(other) and all(a == b for a, b in zip(self, other))
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
# 上面都一样
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self) # 获取实例所属的类
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral): # index 是 int 或其他整数类型
return self._components[index]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
def __setattr__(self, name, value):
cls = type(self)
if len(name) == 1:
if name in cls.shortcut_names:
error = 'readonly attribute {attr_name!}'
elif name.islower():
error = "can't set attributes 'a' to 'z' in {cls_name!r}"
else:
error = ''
if error:
msg = error.format(cls_name = cls.__name__, attr_name = name) # 这个方法好,无论错误是哪个,都可以给定值
raise AttributeError(msg)
super().__setattr__(name, value) # 默认情况,调用超类的 __setattr__ 方法,提供标准行为
def __hash__(self):
hashs = (hash(x) for x in self._components) # 注意这里是生成器表达式,不是列表推导式,可以节省内存
return functools.reduce(operator.xor, hashs)
def angle(self, n):
r = math.sqrt(sum(x * x for x in self[n:]))
a = math.atan2(r, self[n-1])
if (n == len(self) - 1) and (self[-1] < 0):
return math.pi * 2 - a
else:
return a
def angles(self):
return (self.angle(n) for n in range(1, len(self)))
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('h'):
fmt_spec = fmt_spec[:-1]
coords = itertools.chain([abs(self)], # 使用 chain 函数生成生成器表达式,无缝迭代向量的模和各个角坐标
self.angles())
outer_fmt = '<{}>' # 球面坐标
else:
coords = self
outer_fmt = '({})' # 笛卡尔坐标
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(', '.join(components))
def __neg__(self):
return Vector(-x for x in self)
def __pos__(self):
return Vector(self)
def __add__(self, other):
pairs = itertools.zip_longest(self, other, fillvalue=0.0)
return Vector(a + b for a, b in pairs)
v1 = Vector([3, 4, 5])
v1 + [10, 20, 30]
v2 = Vector([1, 2])
v1 + v2
v1 = Vector([3, 4, 5])
(10, 20, 30) + v1
def __radd__(self, other):
return self + other
v1 + 1
v1 + 'ABC'
from array import array
import reprlib
import math
import numbers
import functools
import operator
import itertools
class Vector:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
return len(self) == len(other) and all(a == b for a, b in zip(self, other))
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
# 上面都一样
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self) # 获取实例所属的类
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral): # index 是 int 或其他整数类型
return self._components[index]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
def __setattr__(self, name, value):
cls = type(self)
if len(name) == 1:
if name in cls.shortcut_names:
error = 'readonly attribute {attr_name!}'
elif name.islower():
error = "can't set attributes 'a' to 'z' in {cls_name!r}"
else:
error = ''
if error:
msg = error.format(cls_name = cls.__name__, attr_name = name) # 这个方法好,无论错误是哪个,都可以给定值
raise AttributeError(msg)
super().__setattr__(name, value) # 默认情况,调用超类的 __setattr__ 方法,提供标准行为
def __hash__(self):
hashs = (hash(x) for x in self._components) # 注意这里是生成器表达式,不是列表推导式,可以节省内存
return functools.reduce(operator.xor, hashs)
def angle(self, n):
r = math.sqrt(sum(x * x for x in self[n:]))
a = math.atan2(r, self[n-1])
if (n == len(self) - 1) and (self[-1] < 0):
return math.pi * 2 - a
else:
return a
def angles(self):
return (self.angle(n) for n in range(1, len(self)))
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('h'):
fmt_spec = fmt_spec[:-1]
coords = itertools.chain([abs(self)], # 使用 chain 函数生成生成器表达式,无缝迭代向量的模和各个角坐标
self.angles())
outer_fmt = '<{}>' # 球面坐标
else:
coords = self
outer_fmt = '({})' # 笛卡尔坐标
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(', '.join(components))
def __neg__(self):
return Vector(-x for x in self)
def __pos__(self):
return Vector(self)
def __add__(self, other):
try:
pairs = itertools.zip_longest(self, other, fillvalue=0.0)
return Vector(a + b for a, b in pairs)
except TypeError:
return NotImplemented
def __radd__(self, other):
return self + other
def __mul__(self, scalar):
return Vector(n * scalar for n in self)
def __rmul__(self, scalar):
return self * scalar
from array import array
import reprlib
import math
import numbers
import functools
import operator
import itertools
class Vector:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
return len(self) == len(other) and all(a == b for a, b in zip(self, other))
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
# 上面都一样
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self) # 获取实例所属的类
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral): # index 是 int 或其他整数类型
return self._components[index]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
def __setattr__(self, name, value):
cls = type(self)
if len(name) == 1:
if name in cls.shortcut_names:
error = 'readonly attribute {attr_name!}'
elif name.islower():
error = "can't set attributes 'a' to 'z' in {cls_name!r}"
else:
error = ''
if error:
msg = error.format(cls_name = cls.__name__, attr_name = name) # 这个方法好,无论错误是哪个,都可以给定值
raise AttributeError(msg)
super().__setattr__(name, value) # 默认情况,调用超类的 __setattr__ 方法,提供标准行为
def __hash__(self):
hashs = (hash(x) for x in self._components) # 注意这里是生成器表达式,不是列表推导式,可以节省内存
return functools.reduce(operator.xor, hashs)
def angle(self, n):
r = math.sqrt(sum(x * x for x in self[n:]))
a = math.atan2(r, self[n-1])
if (n == len(self) - 1) and (self[-1] < 0):
return math.pi * 2 - a
else:
return a
def angles(self):
return (self.angle(n) for n in range(1, len(self)))
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('h'):
fmt_spec = fmt_spec[:-1]
coords = itertools.chain([abs(self)], # 使用 chain 函数生成生成器表达式,无缝迭代向量的模和各个角坐标
self.angles())
outer_fmt = '<{}>' # 球面坐标
else:
coords = self
outer_fmt = '({})' # 笛卡尔坐标
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(', '.join(components))
def __neg__(self):
return Vector(-x for x in self)
def __pos__(self):
return Vector(self)
def __add__(self, other):
try:
pairs = itertools.zip_longest(self, other, fillvalue=0.0)
return Vector(a + b for a, b in pairs)
except TypeError:
return NotImplemented
def __radd__(self, other):
return self + other
def __mul__(self, scalar):
if isinstance(scalar, numbers.Real):
return Vector(n * scalar for n in self)
else:
return NotImplemented
def __rmul__(self, scalar):
return self * scalar
v1 = Vector([1.0, 2.0, 3.0])
14 * v1
v1 * True
from fractions import Fraction
v1 * Fraction(1, 3)
va = Vector([1.0, 2.0, 3.0])
vb = Vector(range(1, 4))
va == vb
t3 = (1, 2, 3)
va == t3
from array import array
import reprlib
import math
import numbers
import functools
import operator
import itertools
class Vector:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
# 上面都一样
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self) # 获取实例所属的类
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral): # index 是 int 或其他整数类型
return self._components[index]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
def __setattr__(self, name, value):
cls = type(self)
if len(name) == 1:
if name in cls.shortcut_names:
error = 'readonly attribute {attr_name!}'
elif name.islower():
error = "can't set attributes 'a' to 'z' in {cls_name!r}"
else:
error = ''
if error:
msg = error.format(cls_name = cls.__name__, attr_name = name) # 这个方法好,无论错误是哪个,都可以给定值
raise AttributeError(msg)
super().__setattr__(name, value) # 默认情况,调用超类的 __setattr__ 方法,提供标准行为
def __hash__(self):
hashs = (hash(x) for x in self._components) # 注意这里是生成器表达式,不是列表推导式,可以节省内存
return functools.reduce(operator.xor, hashs)
def angle(self, n):
r = math.sqrt(sum(x * x for x in self[n:]))
a = math.atan2(r, self[n-1])
if (n == len(self) - 1) and (self[-1] < 0):
return math.pi * 2 - a
else:
return a
def angles(self):
return (self.angle(n) for n in range(1, len(self)))
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('h'):
fmt_spec = fmt_spec[:-1]
coords = itertools.chain([abs(self)], # 使用 chain 函数生成生成器表达式,无缝迭代向量的模和各个角坐标
self.angles())
outer_fmt = '<{}>' # 球面坐标
else:
coords = self
outer_fmt = '({})' # 笛卡尔坐标
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(', '.join(components))
def __neg__(self):
return Vector(-x for x in self)
def __pos__(self):
return Vector(self)
def __add__(self, other):
try:
pairs = itertools.zip_longest(self, other, fillvalue=0.0)
return Vector(a + b for a, b in pairs)
except TypeError:
return NotImplemented
def __radd__(self, other):
return self + other
def __mul__(self, scalar):
if isinstance(scalar, numbers.Real):
return Vector(n * scalar for n in self)
else:
return NotImplemented
def __rmul__(self, scalar):
return self * scalar
def __eq__(self, other):
if isinstance(other, Vector):
print('.......')
return (len(self) == len(other) and all(a == b for a, b in zip(self, other)))
else:
return NotImplemented
t3 = (1, 2, 3)
va = Vector([1.0, 2.0, 3.0])
va == t3
va != t3
def __ne__(self, other):
eq_result = self == other
if eq_result is NotImplemented:
return NotImplemented
else:
return not eq_result
v1 = Vector([1, 2, 3])
v1_alias = v1
id(v1)
v1 += Vector([4, 5, 6])
v1
id(v1)
v1_alias
v1 *= 11
v1
id(v1)
import abc
class Tombola(abc.ABC):
@abc.abstractmethod
def load(self, iterable):
'''从可迭代对象中添加元素'''
@abc.abstractmethod # 抽象方法使用此标记
def pick(self):
'''随机删除元素,然后将其返回
如果实例为空,这个方法抛出 LookupError
'''
def loaded(self):
'''如果至少有一个元素,返回 True,否则返回 False'''
return bool(self.inspect()) # 抽象基类中的具体方法只能依赖抽象基类定义的接口(即只能使用抽象基类的其他具体方法,抽象方法或特性)
def inspect(self):
'''返回一个有序元组,由当前元素构成'''
items = []
while 1: # 我们不知道具体子类如何存储元素,为了得到 inspect 结果,不断调用 pick 方法,把 Tombola 清空
try:
items.append(self.pick())
except LookupError:
break
self.load(items) # 再加回去元素
return tuple(sorted(items))
import random
class BingoCage(Tombola):
def __init__(self, items):
self._randomizer = random.SystemRandom()
self._items = []
self.load(items)
def load(self, items):
self._items.extend(items)
self._randomizer.shuffle(self._items)
def pick(self):
try:
return self._items.pop()
except IndexError:
raise LookupError('pick from empty BingoCage')
def __call__(self):
self.pick()
# ==== add
class AddableBingoCage(BingoCage):
def __add__(self, other): # __add__ 方法的第二个操作数只能是 Tombola 实例
if isinstance(other, Tombola): # other 是 Tombola 实例,获取元素
return AddableBingoCage(self.inspect() + other.inspect())
else:
return NotImplemented
def __iadd__(self, other):
if isinstance(other, Tombola):
other_iterable = other.inspect()
else:
try:
other_iterable = iter(other) # 否则创建迭代器
except TypeError:
self_cls = type(self).__name__
msg = 'right operand in += must be {!r} or an iterable'
raise TypeError(msg.format(self_cls))
self.load(other_iterable)
return self # 非常重要,增量赋值特殊方法必须返回 self
vowels = 'AEIOU'
globe = AddableBingoCage(vowels)
globe.inspect()
globe.pick() in vowels
len(globe.inspect())
globe2 = AddableBingoCage('XYZ')
globe3 = globe + globe2
len(globe3.inspect())
void = globe + [10, 20]
globe_orig = globe
len(globe.inspect())
globe += globe2
len(globe.inspect())
globe += ['M', 'N']
len(globe.inspect())
globe is globe_orig
globe += 1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 因为 Vector 实例是可迭代对象,而且 Vector.__init__ 的参数是可迭代对象,所以我们的 __neg__ 和 _pos__ 的实现短小精悍
Step2: 虽然每个 +one_third 表达式都会使用 one_third 的值创建一个新的 Decimal 实例,但是会使用当前算数运算符上下文的精度
Step3: 重载向量加法运算符 +
Step4: pairs 是个生成器,会生成 (a, b) 形式的元组,其中 a 来自 self, b 来自 other,如果 a 和 b 的长度不同,使用 fillvalue 填充较短的可迭代对象
Step5: zip_longest(...) 能处理任何可迭代对象,而且构建新 Vector 实例的生成器表达式仅仅是把 zip_longest(...) 生成的值对相加(a + b),因此可以使用任何生成数字元素的可迭代对象
Step6: 为了支持涉及不同类型的运算,Python 为中缀运算符特殊方法提供了特殊的分派机制,对于表达式 a + b 来说,会执行下面操作:
Step7: 前面的 Vector 对象的加法对象如果不可迭代,__add__ 就无法处理,而且提供的错误消息不是很有用
Step8: 上面揭露的问题比晦涩难懂的错误消息更严重,如果由于类型不兼容导致特殊方法无法返回有效结果,应该返回 NoteImplemented,而不是抛出 TypeError,返回 NotImplemented 时,另一个操作数所属类型还有机会执行运算,Python 会尝试调用反向方法
Step9: 重载标量乘法运算符
Step10: 这两个方法确实可用,但是提供不兼容操作数时候会出问题。scalar 参数的值要是个数字,与浮点数相乘得到的的积是另一个浮点数(因为 Vector 类内部使用的是浮点数数组)。因此,不能使用复数,但是可以是 int,bool(int 的子类),甚至是 fractions.Fraction 实例等标量。
Step11: 众多比较运算符
Step12: Vector 和 元组比较的结果可能不太理想,作者的观点是结果应该由应用上下文决定。不过,”Python 之禅“作者说: 如果存在多重可能,不要猜测
Step13: 上面首先调用 Vector.__eq__(va, t3)
Step14: __ne__ 运作方式与下面类似:
Step15: 可以看到,Python 3 中 __ne__ 对我们来说够用了,一般不用重载。
Step16: 这里的增量运算符只是语法糖,a += b 的行为和 a = a + b 一样,对于不可便类型来说,这是预期行为,而且,如果定义了 __add__ 方法的话,不用写额外的代码 += 就能使用
Step17: 最后,还有一点要注意,从设计上来看,AddableBingoCage 不用定义 __radd__ 方法,因为不需要。如果右操作数是相同类型,那么正向方法 __add__ 会处理,因此 Python 计算 a + b 时,如果 a 是 AddableBingoCage 实例,而 b 不是,那么返回 NotImplemented,那么 Python 最好放这i,抛出 TypeError,因为无法处理 b
|
14,276
|
<ASSISTANT_TASK:>
Python Code:
import yaml
# Set `PATH` to include the directory containing TFX CLI and skaffold.
PATH = %env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
!python -c "import tfx; print('TFX version: {}'.format(tfx.__version__))"
!python -c "import kfp; print('KFP version: {}'.format(kfp.__version__))"
%pip install --upgrade --user tfx==0.25.0
%pip install --upgrade --user kfp==1.0.4
%cd pipeline
!ls -la
# Use the following command to identify the GCS bucket for metadata and pipeline storage.
!gsutil ls
# TODO: Set your environment resource settings here for GCP_REGION, ARTIFACT_STORE_URI, ENDPOINT, and CUSTOM_SERVICE_ACCOUNT.
GCP_REGION = ""
ARTIFACT_STORE_URI = ""
ENDPOINT = ""
CUSTOM_SERVICE_ACCOUNT = ""
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
# Set your resource settings as environment variables. These override the default values in pipeline/config.py.
%env GCP_REGION={GCP_REGION}
%env ARTIFACT_STORE_URI={ARTIFACT_STORE_URI}
%env CUSTOM_SERVICE_ACCOUNT={CUSTOM_SERVICE_ACCOUNT}
%env PROJECT_ID={PROJECT_ID}
PIPELINE_NAME = "tfx_covertype_continuous_training"
MODEL_NAME = "tfx_covertype_classifier"
DATA_ROOT_URI = "gs://workshop-datasets/covertype/small"
CUSTOM_TFX_IMAGE = f"gcr.io/{PROJECT_ID}/{PIPELINE_NAME}"
RUNTIME_VERSION = "2.3"
PYTHON_VERSION = "3.7"
USE_KFP_SA = False
ENABLE_TUNING = False
%env PIPELINE_NAME={PIPELINE_NAME}
%env MODEL_NAME={MODEL_NAME}
%env DATA_ROOT_URI={DATA_ROOT_URI}
%env KUBEFLOW_TFX_IMAGE={CUSTOM_TFX_IMAGE}
%env RUNTIME_VERSION={RUNTIME_VERSION}
%env PYTHON_VERIONS={PYTHON_VERSION}
%env USE_KFP_SA={USE_KFP_SA}
%env ENABLE_TUNING={ENABLE_TUNING}
!tfx pipeline compile --engine kubeflow --pipeline_path runner.py
# TODO: Your code here to use the TFX CLI to deploy your pipeline image to AI Platform Pipelines.
# TODO: your code here to trigger a pipeline run with the TFX CLI
!tfx run list --pipeline_name {PIPELINE_NAME} --endpoint {ENDPOINT}
RUN_ID = "[YOUR RUN ID]"
!tfx run status --pipeline_name {PIPELINE_NAME} --run_id {RUN_ID} --endpoint {ENDPOINT}
ENABLE_TUNING = True
%env ENABLE_TUNING={ENABLE_TUNING}
!tfx pipeline compile --engine kubeflow --pipeline_path runner.py
# TODO: your code to update your pipeline
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Validate lab package version installation
Step2: Note
Step3: Note
Step4: The config.py module configures the default values for the environment specific settings and the default values for the pipeline runtime parameters.
Step5: CUSTOM_SERVICE_ACCOUNT - In the gcp console Click on the Navigation Menu. Navigate to IAM & Admin, then to Service Accounts and use the service account starting with prefix - 'tfx-tuner-caip-service-account'. This enables CloudTuner and the Google Cloud AI Platform extensions Tuner component to work together and allows for distributed and parallel tuning backed by AI Platform Vizier's hyperparameter search algorithm. Please see the lab setup README for setup instructions.
Step6: Set the compile time settings to first create a pipeline version without hyperparameter tuning
Step7: Compile your pipeline code
Step8: Note
Step9: Hint
Step10: To view the status of existing pipeline runs
Step11: To retrieve the status of a given run
Step12: Important
Step13: Compile your pipeline code
Step14: Deploy your pipeline container to AI Platform Pipelines with the TFX CLI
|
14,277
|
<ASSISTANT_TASK:>
Python Code:
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
import numpy as np
bag_of_words = {}
pos_words = {}
neg_words = {}
for i in range(len(reviews)):
words = reviews[i].split(' ')
for word in words:
if word in bag_of_words.keys():
bag_of_words[word] += 1
else:
bag_of_words[word] = 1
pos_words[word] = 0
neg_words[word] = 0
if labels[i] == 'POSITIVE':
if word in pos_words.keys():
pos_words[word] += 1
elif labels[i] == 'NEGATIVE':
if word in neg_words.keys():
neg_words[word] += 1
words_pos_neg_ratio = []
for word in bag_of_words.keys():
if bag_of_words[word] > 500:
pos_neg_ratio = pos_words[word] / float(neg_words[word] + 1)
words_pos_neg_ratio.append((word, np.log(pos_neg_ratio)))
words_pos_neg_ratio = sorted(words_pos_neg_ratio, key=lambda x: x[1], reverse=True)
print('\nTop positive words: \n')
for i in range(10):
print(words_pos_neg_ratio[i][0],': ', round(words_pos_neg_ratio[i][1], 10), sep='')
print('\nTop negative words: \n')
for i in range(-1, -11, -1):
print(words_pos_neg_ratio[i][0],': ', round(words_pos_neg_ratio[i][1], 10), sep='')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lesson
Step2: Project 1
|
14,278
|
<ASSISTANT_TASK:>
Python Code:
from pandas import DataFrame, read_csv
from estnltk import Text
from estnltk.taggers import EventTagger
event_vocabulary = DataFrame([['Harv', 'sagedus'],
['tugev peavalu', 'sümptom']],
columns=['term', 'type'])
event_vocabulary = read_csv('data/event vocabulary.csv')
event_vocabulary = [{'term': 'harv', 'type': 'sagedus'},
{'term': 'tugev peavalu', 'type': 'sümptom'}]
text = Text('Tugev peavalu esineb valimis harva.')
event_tagger = EventTagger(event_vocabulary, search_method='ahocorasick', case_sensitive=False,
conflict_resolving_strategy='ALL', return_layer=True)
event_tagger.tag(text)
event_vocabulary = [
{'term': 'kaks', 'value': 2, 'type': 'väike'},
{'term': 'kümme', 'value': 10, 'type': 'keskmine'},
{'term': 'kakskümmend', 'value': 20, 'type': 'suur'},
{'term': 'kakskümmend kaks', 'value': 22, 'type': 'suur'}
]
text = Text('kakskümmend kaks')
event_tagger = EventTagger(event_vocabulary, search_method='naive', conflict_resolving_strategy='ALL', return_layer=True)
event_tagger.tag(text)
event_tagger = EventTagger(event_vocabulary, search_method='naive', conflict_resolving_strategy='MAX', return_layer=True)
event_tagger.tag(text)
event_tagger = EventTagger(event_vocabulary, search_method='naive', conflict_resolving_strategy='MIN', return_layer=True)
event_tagger.tag(text)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1
Step2: or file event vocabulary.csv in csv format
Step3: or list of dicts
Step4: There must be one key (column) called term in event_vocabulary. That refers to the strings searched from the text. Other keys (type in this example) are optional. No key may have name start, end, wstart_raw, wend_raw, cstart, wstart, or bstart.
Step5: The attributes start and end show at which character the event starts and ends.<br>
Step6: conflict_resolving_strategy='ALL' returns all events.
Step7: conflict_resolving_strategy='MAX' returns all the events that are not contained by any other event.
Step8: conflict_resolving_strategy='MIN' returns all the events that don't contain any other event.
|
14,279
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn import datasets
from sklearn import svm
iris = datasets.load_iris()
# Split the iris data into train/test data sets with 40% reserved for testing
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.4, random_state=0)
# Build an SVC model for predicting iris classifications using training data
clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train)
# Now measure its performance with the test data
clf.score(X_test, y_test)
# We give cross_val_score a model, the entire data set and its "real" values, and the number of folds:
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
# Print the accuracy for each fold:
print(scores)
# And the mean accuracy of all 5 folds:
print(scores.mean())
clf = svm.SVC(kernel='poly', C=1).fit(X_train, y_train)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
print(scores)
print(scores.mean())
# Build an SVC model for predicting iris classifications using training data
clf = svm.SVC(kernel='poly', C=1).fit(X_train, y_train)
# Now measure its performance with the test data
clf.score(X_test, y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A single train/test split is made easy with the train_test_split function in the cross_validation library
Step2: K-Fold cross validation is just as easy; let's use a K of 5
Step3: Our model is even better than we thought! Can we do better? Let's try a different kernel (poly)
Step4: No! The more complex polynomial kernel produced lower accuracy than a simple linear kernel. The polynomial kernel is overfitting. But we couldn't have told that with a single train/test split
|
14,280
|
<ASSISTANT_TASK:>
Python Code:
# run h2o Kmeans
# Import h2o library
import h2o
from h2o.estimators import H2OKMeansEstimator
# init h2o cluster
h2o.init(strict_version_check=False, url="http://192.168.59.147:54321")
# load data
import pandas as pd
data = pd.read_csv("../../smalldata/chicago/chicagoAllWeather.csv")
data = data.iloc[:,[1, 2, 3, 4, 5]]
print(data.shape)
data.head()
# import time to measure elapsed time
from timeit import default_timer as timer
from datetime import timedelta
import time
start = timer()
end = timer()
print("Time:", timedelta(seconds=end-start))
data_h2o = h2o.H2OFrame(data)
# run h2o Kmeans to get good starting points
h2o_km = H2OKMeansEstimator(k=3, init="furthest", standardize=True)
start = timer()
h2o_km.train(training_frame=data_h2o)
end = timer()
user_points = h2o.H2OFrame(h2o_km.centers())
# show details
h2o_km.show()
time_km = timedelta(seconds=end-start)
print("Time:", time_km)
# run h2o constrained Kmeans
h2o_km_co = H2OKMeansEstimator(k=3, user_points=user_points, cluster_size_constraints=[1000, 2000, 1000], standardize=True)
start = timer()
h2o_km_co.train(training_frame=data_h2o)
end = timer()
# show details
h2o_km_co.show()
time_km_co = timedelta(seconds=end-start)
print("Time:", time_km_co)
from h2o.estimators.aggregator import H2OAggregatorEstimator
# original data size 5162, constraints 1000, 2000, 1000
# aggregated data size ~ 2581, constaints 500, 1000, 500
params = {
"target_num_exemplars": 2581,
"rel_tol_num_exemplars": 0.01,
"categorical_encoding": "eigen"
}
agg = H2OAggregatorEstimator(**params)
start = timer()
agg.train(training_frame=data_h2o)
data_agg = agg.aggregated_frame
# run h2o Kmeans
h2o_km_co_agg = H2OKMeansEstimator(k=3, user_points=user_points, cluster_size_constraints=[500, 1000, 500], standardize=True)
h2o_km_co_agg.train(x=["month", "day", "year", "maxTemp", "meanTemp"],training_frame=data_agg)
end = timer()
# show details
h2o_km_co_agg.show()
time_km_co_12 = timedelta(seconds=end-start)
print("Time:", time_km_co_12)
from h2o.estimators.aggregator import H2OAggregatorEstimator
# original data size 5162, constraints 1000, 2000, 1000
# aggregated data size ~ 1290, constaints 250, 500, 250
params = {
"target_num_exemplars": 1290,
"rel_tol_num_exemplars": 0.01,
"categorical_encoding": "eigen"
}
agg_14 = H2OAggregatorEstimator(**params)
start = timer()
agg_14.train(training_frame=data_h2o)
data_agg_14 = agg_14.aggregated_frame
# run h2o Kmeans
h2o_km_co_agg_14 = H2OKMeansEstimator(k=3, user_points=user_points, cluster_size_constraints=[240, 480, 240], standardize=True)
h2o_km_co_agg_14.train(x=list(range(5)),training_frame=data_agg_14)
end = timer()
# show details
h2o_km_co_agg_14.show()
time_km_co_14 = timedelta(seconds=end-start)
print("Time:", time_km_co_14)
centers_km_co = h2o_km_co.centers()
centers_km_co_agg_12 = h2o_km_co_agg.centers()
centers_km_co_agg_14 = h2o_km_co_agg_14.centers()
centers_all = pd.concat([pd.DataFrame(centers_km_co).sort_values(by=[0]), pd.DataFrame(centers_km_co_agg_12).sort_values(by=[0]), pd.DataFrame(centers_km_co_agg_14).sort_values(by=[0])])
diff_first_cluster = pd.concat([centers_all.iloc[0,:] - centers_all.iloc[3,:], centers_all.iloc[0,:] - centers_all.iloc[6,:]], axis=1, ignore_index=True).transpose()
diff_first_cluster.index = ["1/2", "1/4"]
diff_first_cluster.style.bar(subset=[0,1,2,3,4], align='mid', color=['#d65f5f', '#5fba7d'], width=90)
diff_second_cluster = pd.concat([centers_all.iloc[1,:] - centers_all.iloc[4,:], centers_all.iloc[1,:] - centers_all.iloc[7,:]], axis=1, ignore_index=True).transpose()
diff_second_cluster.index = ["1/2", "1/4"]
diff_second_cluster.style.bar(subset=[0,1,2,3,4], align='mid', color=['#d65f5f', '#5fba7d'], width=90)
diff_third_cluster = pd.concat([centers_all.iloc[2,:] - centers_all.iloc[5,:], centers_all.iloc[2,:] - centers_all.iloc[8,:]], axis=1, ignore_index=True).transpose()
diff_third_cluster.index = ["1/2", "1/4"]
diff_third_cluster.style.bar(subset=[0,1,2,3,4], color=['#d65f5f', '#5fba7d'], align="mid", width=90)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data - Chicago Weather dataset
Step2: Traditional K-means
Step3: Constrained K-means reduced data using Aggregator - changed size 1/2 of original data
Step4: Constrained K-means reduced data using Aggregator - changed size 1/4 of original data
Step5: Results
Step6: Difference between coordinates of original data and aggregated data
|
14,281
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
p, X = load_data()
assert type(X) == np.ndarray
km = KMeans()
km.fit(X)
d = km.transform(X)[:, p]
indexes = np.argsort(d)[::][:100]
closest_100_samples = X[indexes]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
14,282
|
<ASSISTANT_TASK:>
Python Code:
import h5py
import numpy as np
h5file = h5py.File("/Users/users/breddels/src/vaex/data/helmi-dezeeuw-2000-10p.hdf5", "r")
FeH = h5file["/data/FeH"]
# FeH is your regular numpy array (with some extras)
print("mean FeH", np.mean(FeH), "length", len(FeH))
print(FeH.attrs["ucd"], FeH.attrs["unit"])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: More information about a column can be found using
|
14,283
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pylab as plt
import tensorflow as tf
!pip install -U tf-hub-nightly
import tensorflow_hub as hub
from tensorflow.keras import layers
classifier_url ="https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/2" #@param {type:"string"}
IMAGE_SHAPE = (224, 224)
classifier = tf.keras.Sequential([
hub.KerasLayer(classifier_url, input_shape=IMAGE_SHAPE+(3,))
])
import numpy as np
import PIL.Image as Image
grace_hopper = tf.keras.utils.get_file('image.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg')
grace_hopper = Image.open(grace_hopper).resize(IMAGE_SHAPE)
grace_hopper
grace_hopper = np.array(grace_hopper)/255.0
grace_hopper.shape
result = classifier.predict(grace_hopper[np.newaxis, ...])
result.shape
predicted_class = np.argmax(result[0], axis=-1)
predicted_class
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
plt.imshow(grace_hopper)
plt.axis('off')
predicted_class_name = imagenet_labels[predicted_class]
_ = plt.title("Prediction: " + predicted_class_name.title())
data_root = tf.keras.utils.get_file(
'flower_photos','https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
image_data = image_generator.flow_from_directory(str(data_root), target_size=IMAGE_SHAPE)
for image_batch, label_batch in image_data:
print("Image batch shape: ", image_batch.shape)
print("Label batch shape: ", label_batch.shape)
break
result_batch = classifier.predict(image_batch)
result_batch.shape
predicted_class_names = imagenet_labels[np.argmax(result_batch, axis=-1)]
predicted_class_names
plt.figure(figsize=(10,9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
plt.subplot(6,5,n+1)
plt.imshow(image_batch[n])
plt.title(predicted_class_names[n])
plt.axis('off')
_ = plt.suptitle("ImageNet predictions")
feature_extractor_url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/2" #@param {type:"string"}
feature_extractor_layer = hub.KerasLayer(feature_extractor_url,
input_shape=(224,224,3))
feature_batch = feature_extractor_layer(image_batch)
print(feature_batch.shape)
feature_extractor_layer.trainable = False
model = tf.keras.Sequential([
feature_extractor_layer,
layers.Dense(image_data.num_classes, activation='softmax')
])
model.summary()
predictions = model(image_batch)
predictions.shape
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['acc'])
class CollectBatchStats(tf.keras.callbacks.Callback):
def __init__(self):
self.batch_losses = []
self.batch_acc = []
def on_train_batch_end(self, batch, logs=None):
self.batch_losses.append(logs['loss'])
self.batch_acc.append(logs['acc'])
self.model.reset_metrics()
steps_per_epoch = np.ceil(image_data.samples/image_data.batch_size)
batch_stats_callback = CollectBatchStats()
history = model.fit_generator(image_data, epochs=2,
steps_per_epoch=steps_per_epoch,
callbacks = [batch_stats_callback])
plt.figure()
plt.ylabel("Loss")
plt.xlabel("Training Steps")
plt.ylim([0,2])
plt.plot(batch_stats_callback.batch_losses)
plt.figure()
plt.ylabel("Accuracy")
plt.xlabel("Training Steps")
plt.ylim([0,1])
plt.plot(batch_stats_callback.batch_acc)
class_names = sorted(image_data.class_indices.items(), key=lambda pair:pair[1])
class_names = np.array([key.title() for key, value in class_names])
class_names
predicted_batch = model.predict(image_batch)
predicted_id = np.argmax(predicted_batch, axis=-1)
predicted_label_batch = class_names[predicted_id]
label_id = np.argmax(label_batch, axis=-1)
plt.figure(figsize=(10,9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
plt.subplot(6,5,n+1)
plt.imshow(image_batch[n])
color = "green" if predicted_id[n] == label_id[n] else "red"
plt.title(predicted_label_batch[n].title(), color=color)
plt.axis('off')
_ = plt.suptitle("Model predictions (green: correct, red: incorrect)")
import time
t = time.time()
export_path = "/tmp/saved_models/{}".format(int(t))
model.save(export_path, save_format='tf')
export_path
reloaded = tf.keras.models.load_model(export_path)
result_batch = model.predict(image_batch)
reloaded_result_batch = reloaded.predict(image_batch)
abs(reloaded_result_batch - result_batch).max()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 텐서플로 허브와 전이학습
Step2: ImageNet 분류기
Step3: 싱글 이미지 실행시키기
Step4: 차원 배치를 추가하세요, 그리고 이미지를 모델에 통과시키세요.
Step5: 그 결과는 로지트의 1001 요소 벡터입니다. 이는 이미지에 대한 각각의 클래스 확률을 계산합니다.
Step6: 예측 해독하기
Step7: 간단한 전이 학습
Step8: 우리의 모델에 이 데이터를 가장 간단하게 로딩 하는 방법은 tf.keras.preprocessing.image.image.ImageDataGenerator를 사용하는 것이고,
Step9: 결과로 나온 오브젝트는 image_batch와 label_batch를 같이 리턴 하는 반복자입니다.
Step10: 이미지 배치에 대한 분류기를 실행해보자
Step11: 얼마나 많은 예측들이 이미지에 맞는지 검토해봅시다
Step12: 이미지 속성을 가진 LICENSE.txt 파일을 보세요.
Step13: 특성 추출기를 만들어봅시다.
Step14: 이 것은 각각의 이미지마다 길이가 1280인 벡터가 반환됩니다
Step15: 특성 추출기 계층에 있는 변수들을 굳히면, 학습은 오직 새로운 분류 계층만 변경시킬 수 있습니다.
Step16: 분류 head를 붙이세요.
Step17: 모델을 학습시키세요
Step18: 이제 모델을 학습시키기 위해 .fit방법을 사용하세요.
Step19: 지금부터, 단순한 학습 반복이지만, 우리는 항상 모델이 프로세스를 만드는 중이라는 것을 알 수 있습니다.
Step20: 예측을 확인하세요
Step21: 모델을 통해 이미지 배치를 실행시키세요. 그리고 인덱스들을 클래스 이름으로 바꾸세요.
Step22: 결과를 계획하세요
Step23: 당신의 모델을 내보내세요
Step24: 이제 우리는 그것을 새롭게 로딩 할 수 있고, 이는 같은 결과를 줄 것입니다
|
14,284
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
import ipywidgets as widgets
from IPython.display import display
L = 200
dx = .5
U = lambda x:(0.*x)
buttonrunsim = widgets.Button(description="Simulate")
buttongenerateu = widgets.Button(description="Generate U")
outwdt = widgets.Output()
centropaquete = widgets.FloatSlider(value=-3*L/4,min=-L,max=L,step=1,
description='centro del paquete',
ensure_option=True,
disabled=False)
anchopaquete = widgets.FloatSlider(value=L/8,min=1,max=L,step=1,
description='ancho del paquete',
ensure_option=True,
disabled=False)
valuek0 = widgets.FloatSlider(value=0,min=-np.pi/dx,max=np.pi/dx,step=.1,
description='k0',
ensure_option=True,
disabled=False)
valueV0 = widgets.FloatSlider(value=0,min=-4,max=4,step=.1,
description='V0',
ensure_option=True,
disabled=False)
potencial = widgets.Dropdown(options=["Lineal","Cuadrado","Rampa","Cuadrático","Gaussiano"],
description="Forma del Potencial",
ensure_option=True,
disabled=False)
valueW = widgets.FloatSlider(value=1.,min=1.,max=300.,step=.5,
description='Alcance potencial',
ensure_option=True,
disabled=False)
items = [[widgets.Label("Paquete"),centropaquete,anchopaquete,valuek0,buttonrunsim,
widgets.Label("Potencial"),valueW,valueV0,potencial,buttongenerateu],[outwdt]]
widgets.HBox([widgets.VBox(it) for it in items])
# Para fijar parámetros a mano
#### Parámetros de la discretización
dx = .5
dt = 10.
L = 200
#### Estado inicial
a = 20.
x0 = -L/3
k0 = 10./a
#### Potencial
U0= -1.
w = 10.
import numpy.fft as fft
import numpy.linalg as la
############ Inicializar #############
def init_evolop(L=L,dx=dx,dt=dt,pot=None):
global rho0
global psi
global psi0
global xs
global evol_op
global P, ham
xs = np.linspace(-L,L,int(2*L/dx))
## Construyo el operador evolución
nhalf = int(L/dx)
# Energía cinética
ks = np.pi/L*np.array([(1.*q) if q<nhalf else (q-2*nhalf) for q in range(2*nhalf)])
Uft = np.array([fft.ifft(v)*np.sqrt(2*nhalf) for v in np.eye(2*nhalf)])
P = (ks*(Uft).transpose()).dot(Uft.conj())
ham = .5*P.dot(P)
# Energía potencial
if pot is None:
U = 0.*xs
else:
U = pot(xs)
ham = ham + np.diag(U)
ens,states = np.linalg.eigh(ham)
evol_op = (states.conj()).dot((np.exp(-1j*dt*ens)*states).transpose())
return evol_op
init_evolop()
def init_state(L=L,dx=dx,dt=dt,a=a,x0=x0,k0=k0):
global rho0
global psi
global psi0
global xs
global evol_op
global P
xs = np.linspace(-L,L,int(2*L/dx))
rho0 = np.array([np.exp(-(x-x0)**2/(2*a**2)) for x in xs])
rho0[0] = 0
rho0[-1] = .5*rho0[-2]
rho0[1] = .5*rho0[2]
rho0 = rho0/sum(rho0)/dx
psi0 = rho0**.5 * np.exp(1j*k0*(xs-x0))
psi = psi0
return psi
def Ulineal(xs):
return -U0*xs/w
def Ugaussiano(xs):
return U0*np.exp(-xs**2/(.5*w**2))
def Uescalon(xs):
return np.array([U0 if abs(x)<.5*w else 0 for x in xs])
def Uarmonico(xs):
return np.array([U0*(1-4*(x/w)**2) if abs(x)<.5 * w else 0 for x in xs])
def Urampadoble(xs):
return np.array([U0*(1-2*abs(x)/w) if abs(x)<.5 * w else 0 for x in xs])
def make_animation_new(ts=50, progress=None):
global psi
global xs
global U0,k0,w,x0,a,U, evol_op
psi = init_state(a=a,dt=dt,k0=k0,x0=x0)
rho0 = abs(psi)**2
fig1 = plt.figure()
plt.xlim(-L,L)
plt.ylim(-abs(U0)-k0**2-.5,.5+abs(U0)+k0**2)
plt.plot(xs, U(xs),ls="-.")
density,= plt.plot(xs,50*(U0+.1)*rho0+.5*k0**2)
def update_graphic(t,density):
global psi, evol_op
if progress is not None:
progress.value = t
if t>=1:
psi = evol_op.dot(psi)
rho = abs(psi)**2
density.set_data(xs,50*(U0+.1)*rho+.5*k0**2)
return density,
line_ani = animation.FuncAnimation(fig1, update_graphic, ts, fargs=(density,),
interval=100, blit=True)
plt.close()
return HTML(line_ani.to_jshtml())
def on_buttongenerateu(b):
global U, evol_op, w, U0,ham
U0 = valueV0.value
w = valueW.value
if potencial.value == "Lineal":
U = Ulineal
elif potencial.value == "Gaussiano":
U = Ugaussiano
elif potencial.value == "Cuadrático":
U = Uarmonico
elif potencial.value == "Cuadrado":
U = Uescalon
elif potencial.value == "Rampa":
U = Urampadoble
else:
print("No encontrado")
return
outwdt.clear_output()
with outwdt:
print("U=",U)
print("U0=",U0)
print("w=",w)
evol_op = init_evolop(pot=U)
outwdt.clear_output()
with outwdt:
plt.plot(xs, U(xs))
display(plt.show())
plt.ylim(-5,5)
plt.xlim(0,100)
plt.scatter(range(len(ham)),sorted(la.eigvalsh(ham)))
plt.title("Hamiltonian Spectrum")
display(plt.show())
def on_button_start_sim(b):
global U0, w, k0, a, x0, U, U0, w
k0 = valuek0.value
a = anchopaquete.value
x0 = centropaquete.value
outwdt.clear_output()
progress = widgets.FloatProgress(value=0,min=0,max=50,step=1,description='Simulating:',
bar_style='info',orientation='horizontal')
with outwdt:
print([a,x0,k0,U0,w])
display(progress)
result = make_animation_new(progress=progress)
outwdt.clear_output()
display(result)
buttonrunsim.on_click(on_button_start_sim)
buttongenerateu.on_click(on_buttongenerateu)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Código que controla las simulaciones
Step2: Rutinas que generan inicializan y generan la simulación
Step3: Rutinas que conectan los controles de la primera celda con la simulacion
|
14,285
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
graphlab.canvas.set_target('ipynb')
loans = graphlab.SFrame('lending-club-data.gl/')
loans.column_names()
loans['grade'].show()
loans['home_ownership'].show()
# safe_loans = 1 => safe
# safe_loans = -1 => risky
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)
loans = loans.remove_column('bad_loans')
loans['safe_loans'].show(view = 'Categorical')
features = ['grade', # grade of the loan
'sub_grade', # sub-grade of the loan
'short_emp', # one year or less of employment
'emp_length_num', # number of years of employment
'home_ownership', # home_ownership status: own, mortgage or rent
'dti', # debt to income ratio
'purpose', # the purpose of the loan
'term', # the term of the loan
'last_delinq_none', # has borrower had a delinquincy
'last_major_derog_none', # has borrower had 90 day or worse rating
'revol_util', # percent of available credit being used
'total_rec_late_fee', # total late fees received to day
]
target = 'safe_loans' # prediction target (y) (+1 means safe, -1 is risky)
# Extract the feature columns and target column
loans = loans[features + [target]]
safe_loans_raw = loans[loans[target] == +1]
risky_loans_raw = loans[loans[target] == -1]
print "Number of safe loans : %s" % len(safe_loans_raw)
print "Number of risky loans : %s" % len(risky_loans_raw)
print "Percentage of safe loans :", len(safe_loans_raw) * 1. / len(loans)
print "Percentage of risky loans :", len(risky_loans_raw) * 1. / len(loans)
# Since there are fewer risky loans than safe loans, find the ratio of the sizes
# and use that percentage to undersample the safe loans.
percentage = len(risky_loans_raw)/float(len(safe_loans_raw))
risky_loans = risky_loans_raw
safe_loans = safe_loans_raw.sample(percentage, seed=1)
# Append the risky_loans with the downsampled version of safe_loans
loans_data = risky_loans.append(safe_loans)
print "Percentage of safe loans :", len(safe_loans) / float(len(loans_data))
print "Percentage of risky loans :", len(risky_loans) / float(len(loans_data))
print "Total number of loans in our new dataset :", len(loans_data)
train_data, validation_data = loans_data.random_split(.8, seed=1)
decision_tree_model = graphlab.decision_tree_classifier.create(train_data, validation_set=None,
target = target, features = features)
small_model = graphlab.decision_tree_classifier.create(train_data, validation_set=None,
target = target, features = features, max_depth = 2)
small_model.show(view="Tree")
validation_safe_loans = validation_data[validation_data[target] == 1]
validation_risky_loans = validation_data[validation_data[target] == -1]
sample_validation_data_risky = validation_risky_loans[0:2]
sample_validation_data_safe = validation_safe_loans[0:2]
sample_validation_data = sample_validation_data_safe.append(sample_validation_data_risky)
sample_validation_data
decision_tree_model.predict(sample_validation_data)
sample_validation_data['safe_loans']
decision_tree_model.predict(sample_validation_data, output_type='probability')
small_model.predict(sample_validation_data, output_type='probability')
sample_validation_data[1]
small_model.show(view="Tree")
small_model.predict(sample_validation_data[1])
print small_model.evaluate(train_data)['accuracy']
print decision_tree_model.evaluate(train_data)['accuracy']
print small_model.evaluate(validation_data)['accuracy']
print decision_tree_model.evaluate(validation_data)['accuracy']
big_model = graphlab.decision_tree_classifier.create(train_data, validation_set=None,
target = target, features = features, max_depth = 10)
print big_model.evaluate(train_data)['accuracy']
print big_model.evaluate(validation_data)['accuracy']
predictions = decision_tree_model.predict(validation_data)
((predictions == 1) & (validation_data['safe_loans'] == -1)).sum()
((predictions == -1) & (validation_data['safe_loans'] == 1)).sum()
"{}".format(1716 * 10000 + 1656 * 20000)
"{:.2E}".format(1716 * 10000 + 1656 * 20000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load LendingClub dataset
Step2: Exploring some features
Step3: Here, we see that we have some feature columns that have to do with grade of the loan, annual income, home ownership status, etc. Let's take a look at the distribution of loan grades in the dataset.
Step4: We can see that over half of the loan grades are assigned values B or C. Each loan is assigned one of these grades, along with a more finely discretized feature called sub_grade (feel free to explore that feature column as well!). These values depend on the loan application and credit report, and determine the interest rate of the loan. More information can be found here.
Step5: This feature describes whether the loanee is mortaging, renting, or owns a home. We can see that a small percentage of the loanees own a home.
Step6: Now, let us explore the distribution of the column safe_loans. This gives us a sense of how many safe and risky loans are present in the dataset.
Step7: You should have
Step8: What remains now is a subset of features and the target that we will use for the rest of this notebook.
Step9: Now, write some code to compute below the percentage of safe and risky loans in the dataset and validate these numbers against what was given using .show earlier in the assignment
Step10: One way to combat class imbalance is to undersample the larger class until the class distribution is approximately half and half. Here, we will undersample the larger class (safe loans) in order to balance out our dataset. This means we are throwing away many data points. We used seed=1 so everyone gets the same results.
Step11: Now, let's verify that the resulting percentage of safe and risky loans are each nearly 50%.
Step12: Note
Step13: Use decision tree to build a classifier
Step14: Visualizing a learned model
Step15: In the view that is provided by GraphLab Create, you can see each node, and each split at each node. This visualization is great for considering what happens when this model predicts the target of a new data point.
Step16: Making predictions
Step17: Explore label predictions
Step18: Quiz Question
Step19: Quiz Question
Step20: Quiz Question
Step21: Let's visualize the small tree here to do the traversing for this data point.
Step22: Note
Step23: Evaluating accuracy of the decision tree model
Step24: Checkpoint
Step25: Quiz Question
Step26: Now, let us evaluate big_model on the training set and validation set.
Step27: Checkpoint
Step28: False positives are predictions where the model predicts +1 but the true label is -1. Complete the following code block for the number of false positives
Step29: False negatives are predictions where the model predicts -1 but the true label is +1. Complete the following code block for the number of false negatives
Step30: Quiz Question
|
14,286
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from pennies.market.interpolate import CubicSplineWithNodeSens
def f(x):
return np.sin(x)
x = 0.5 * np.arange(10)
y = f(x)
for i in range(len(x)):
print('({}, {}'.format(x[i],y[i]))
cs_sens = CubicSplineWithNodeSens(x, y, bc_type='clamped')
xs = np.linspace(0, 5)
plt.figure(figsize=(12,10))
plt.title('Cubic Approximation of Spline')
plt.plot(x, y, 'o', label='data')
plt.plot(xs, f(xs), label='true')
plt.plot(xs, cs_sens(xs), label='cubic')
plt.legend(loc='lower left', fontsize=20)
plt.show()
# Calculate the sensitivity of x1 to a unit move in each node in the curve (x,y)
x1 = 2.25
y_sens = cs_sens.node_derivative(x1)
plt.figure(figsize=(12, 10))
plt.title('node sensitivities at x={}'.format(x1))
#plt.plot(x, y_sens, 'o', ms=10)
plt.bar(x, y_sens, width=0.4, color='0.75', edgecolor='k')
plt.ylim(min(y_sens) - 0.1, max(y_sens)+0.1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: y = sin(x)
Step2: Sensitivity to nodes
|
14,287
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
tmpdir = tempfile.mkdtemp()
physical_devices = tf.config.list_physical_devices('GPU')
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, True)
file = tf.keras.utils.get_file(
"grace_hopper.jpg",
"https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg")
img = tf.keras.utils.load_img(file, target_size=[224, 224])
plt.imshow(img)
plt.axis('off')
x = tf.keras.utils.img_to_array(img)
x = tf.keras.applications.mobilenet.preprocess_input(
x[tf.newaxis,...])
labels_path = tf.keras.utils.get_file(
'ImageNetLabels.txt',
'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
pretrained_model = tf.keras.applications.MobileNet()
result_before_save = pretrained_model(x)
decoded = imagenet_labels[np.argsort(result_before_save)[0,::-1][:5]+1]
print("Result before saving:\n", decoded)
mobilenet_save_path = os.path.join(tmpdir, "mobilenet/1/")
tf.saved_model.save(pretrained_model, mobilenet_save_path)
loaded = tf.saved_model.load(mobilenet_save_path)
print(list(loaded.signatures.keys())) # ["serving_default"]
infer = loaded.signatures["serving_default"]
print(infer.structured_outputs)
labeling = infer(tf.constant(x))[pretrained_model.output_names[0]]
decoded = imagenet_labels[np.argsort(labeling)[0,::-1][:5]+1]
print("Result after saving and loading:\n", decoded)
!ls {mobilenet_save_path}
!saved_model_cli show --dir {mobilenet_save_path} --tag_set serve
!ls {mobilenet_save_path}/variables
class CustomModule(tf.Module):
def __init__(self):
super(CustomModule, self).__init__()
self.v = tf.Variable(1.)
@tf.function
def __call__(self, x):
print('Tracing with', x)
return x * self.v
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def mutate(self, new_v):
self.v.assign(new_v)
module = CustomModule()
module_no_signatures_path = os.path.join(tmpdir, 'module_no_signatures')
module(tf.constant(0.))
print('Saving model...')
tf.saved_model.save(module, module_no_signatures_path)
imported = tf.saved_model.load(module_no_signatures_path)
assert imported(tf.constant(3.)).numpy() == 3
imported.mutate(tf.constant(2.))
assert imported(tf.constant(3.)).numpy() == 6
optimizer = tf.optimizers.SGD(0.05)
def train_step():
with tf.GradientTape() as tape:
loss = (10. - imported(tf.constant(2.))) ** 2
variables = tape.watched_variables()
grads = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(grads, variables))
return loss
for _ in range(10):
# "v" approaches 5, "loss" approaches 0
print("loss={:.2f} v={:.2f}".format(train_step(), imported.v.numpy()))
loaded = tf.saved_model.load(mobilenet_save_path)
print("MobileNet has {} trainable variables: {}, ...".format(
len(loaded.trainable_variables),
", ".join([v.name for v in loaded.trainable_variables[:5]])))
trainable_variable_ids = {id(v) for v in loaded.trainable_variables}
non_trainable_variables = [v for v in loaded.variables
if id(v) not in trainable_variable_ids]
print("MobileNet also has {} non-trainable variables: {}, ...".format(
len(non_trainable_variables),
", ".join([v.name for v in non_trainable_variables[:3]])))
assert len(imported.signatures) == 0
module_with_signature_path = os.path.join(tmpdir, 'module_with_signature')
call = module.__call__.get_concrete_function(tf.TensorSpec(None, tf.float32))
tf.saved_model.save(module, module_with_signature_path, signatures=call)
imported_with_signatures = tf.saved_model.load(module_with_signature_path)
list(imported_with_signatures.signatures.keys())
module_multiple_signatures_path = os.path.join(tmpdir, 'module_with_multiple_signatures')
signatures = {"serving_default": call,
"array_input": module.__call__.get_concrete_function(tf.TensorSpec([None], tf.float32))}
tf.saved_model.save(module, module_multiple_signatures_path, signatures=signatures)
imported_with_multiple_signatures = tf.saved_model.load(module_multiple_signatures_path)
list(imported_with_multiple_signatures.signatures.keys())
class CustomModuleWithOutputName(tf.Module):
def __init__(self):
super(CustomModuleWithOutputName, self).__init__()
self.v = tf.Variable(1.)
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def __call__(self, x):
return {'custom_output_name': x * self.v}
module_output = CustomModuleWithOutputName()
call_output = module_output.__call__.get_concrete_function(tf.TensorSpec(None, tf.float32))
module_output_path = os.path.join(tmpdir, 'module_with_output_name')
tf.saved_model.save(module_output, module_output_path,
signatures={'serving_default': call_output})
imported_with_output_name = tf.saved_model.load(module_output_path)
imported_with_output_name.signatures['serving_default'].structured_outputs
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SavedModel 形式の使用
Step2: 実行例として、グレース・ホッパーの画像と Keras の次元トレーニング済み画像分類モデルを使用します(使いやすいため)。カスタムモデルも使用できますが、これについては後半で説明します。
Step3: この画像の予測トップは「軍服」です。
Step4: save-path は、TensorFlow Serving が使用する規則に従っており、最後のパスコンポーネント(この場合 1/)はモデルのバージョンを指します。Tensorflow Serving のようなツールで、相対的な鮮度を区別させることができます。
Step5: インポートされるシグネチャは、必ずディクショナリを返します。シグネチャ名と出力ディクショナリキーをカスタマイズするには、「エクスポート中のシグネチャの指定」を参照してください。
Step6: SavedModel から推論を実行すると、元のモデルと同じ結果が得られます。
Step7: TensorFlow Serving での SavedModel の実行
Step8: saved_model.pb ファイルは、実際の TensorFlow プログラムまたはモデル、およびテンソル入力を受け入れてテンソル出力を生成する関数を識別する一連の名前付きシグネチャを保存します。
Step9: variables ディレクトリには、標準のトレーニングチェックポイントが含まれます(「トレーニングチェックポイントガイド」を参照してください)。
Step10: assets ディレクトリには、語彙テーブルを初期化するためのテキストファイルなど、TensorFlow グラフが使用するファイルが含まれます。この例では使用されません。
Step11: tf.Module を保存すると、すべての tf.Variable 属性、tf.function でデコレートされたメソッド、および再帰トラバースで見つかった tf.Module が保存されます(この再帰トラバースについては、「チェックポイントのチュートリアル」を参照してください)。ただし、Python の属性、関数、およびデータは失われます。つまり、tf.function が保存されても、Python コードは保存されません。
Step12: カスタムモデルの読み込みと使用
Step13: Python コードは保存されないため、新しい入力シグネチャで tf.function で呼び出しても失敗します。
Step14: 一般的な微調整
Step15: エクスポート中のシグネチャの指定
Step16: サービングシグネチャを宣言するには、signatures kwarg を使用して ConcreteFunction 指定します。単一のシグネチャを指定する場合、シグネチャキーは 'serving_default' となり、定数 tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY として保存されます。
Step17: 複数のシグネチャをエクスポートするには、シグネチャキーのディクショナリを ConcreteFunction に渡します。各シグネチャキーは 1 つの ConcreteFunction に対応します。
Step18: デフォルトでは、出力されたテンソル名は、output_0 というようにかなり一般的な名前です。出力の名前を制御するには、出力名を出力にマッピングするディクショナリを返すように tf.function を変更します。入力の名前は Python 関数の引数名から取られます。
|
14,288
|
<ASSISTANT_TASK:>
Python Code:
import os, csv, lzma
import numpy as np
import open_cp.sources.chicago
import geopandas as gpd
import pyproj
import shapely.geometry
#datadir = os.path.join("/media", "OTHERDATA")
datadir = os.path.join("..", "..", "..", "..", "Data")
open_cp.sources.chicago.set_data_directory(datadir)
polygon = open_cp.sources.chicago.get_side("South")
frame = gpd.GeoDataFrame({"name":["South Side"]})
frame.geometry = [polygon]
frame.crs = {"init":"epsg:2790"}
frame
frame.to_file("SouthSide")
def gen():
filename = os.path.join(datadir, "chicago_all_dec2017.csv.xz")
with lzma.open(filename, "rt") as f:
yield from csv.reader(f)
rows = gen()
print(next(rows))
print(next(rows))
proj = pyproj.Proj({"init":"epsg:2790"})
rows = gen()
header = next(rows)
choices = []
for row in rows:
if row[19] is "":
continue
if row[2][6:10] != "2016":
continue
x, y = proj(float(row[20]), float(row[19]))
pt = shapely.geometry.Point(x, y)
if polygon.intersects(pt):
choices.append(row)
want = np.sort(np.random.choice(len(choices), 1000, replace=False))
row = next(gen())
out = []
out.append([row[1], row[2], row[3], row[5], row[19], row[20]])
for i, row in enumerate(choices):
if i in want:
out.append([row[1], row[2], row[3], row[5], row[19], row[20]])
with open("example.csv", "w", newline="") as f:
csv.writer(f).writerows(out)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get our favourite, the southside
Step2: Process the data
|
14,289
|
<ASSISTANT_TASK:>
Python Code:
a = 10
print(a)
import time
time.sleep(10)
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
print("hi, stdout")
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
for i in range(50):
print(i)
for i in range(50):
print(2**i - 1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There are two other keyboard shortcuts for running code
Step2: If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
Step3: Cell menu
Step4: Output is asynchronous
Step5: Large outputs
Step6: Beyond a certain point, output will scroll automatically
|
14,290
|
<ASSISTANT_TASK:>
Python Code:
# Download example dataset
from msmbuilder.example_datasets import FsPeptide
fs_peptide = FsPeptide(verbose=False)
fs_peptide.cache()
# Work in a temporary directory
import tempfile
import os
os.chdir(tempfile.mkdtemp())
from msmbuilder.dataset import dataset
xyz = dataset(fs_peptide.data_dir + "/*.xtc",
topology=fs_peptide.data_dir + '/fs-peptide.pdb',
stride=10)
print("{} trajectories".format(len(xyz)))
# msmbuilder does not keep track of units! You must keep track of your
# data's timestep
to_ns = 0.5
print("with length {} ns".format(set(len(x)*to_ns for x in xyz)))
from msmbuilder.featurizer import DihedralFeaturizer
featurizer = DihedralFeaturizer(types=['phi', 'psi'])
diheds = xyz.fit_transform_with(featurizer, 'diheds/', fmt='dir-npy')
print(xyz[0].xyz.shape)
print(diheds[0].shape)
from msmbuilder.preprocessing import RobustScaler
scaler = RobustScaler()
scaled_diheds = diheds.fit_transform_with(scaler, 'scaled_diheds/', fmt='dir-npy')
print(diheds[0].shape)
print(scaled_diheds[0].shape)
from msmbuilder.decomposition import tICA
tica_model = tICA(lag_time=2, n_components=4)
# fit and transform can be done in seperate steps:
tica_model = scaled_diheds.fit_with(tica_model)
tica_trajs = scaled_diheds.transform_with(tica_model, 'ticas/', fmt='dir-npy')
print(diheds[0].shape)
print(tica_trajs[0].shape)
%matplotlib inline
import msmexplorer as msme
import numpy as np
txx = np.concatenate(tica_trajs)
_ = msme.plot_histogram(txx)
from msmbuilder.cluster import MiniBatchKMeans
clusterer = MiniBatchKMeans(n_clusters=100, random_state=42)
clustered_trajs = tica_trajs.fit_transform_with(
clusterer, 'kmeans/', fmt='dir-npy'
)
print(tica_trajs[0].shape)
print(clustered_trajs[0].shape)
from matplotlib import pyplot as plt
plt.hexbin(txx[:,0], txx[:,1], bins='log', mincnt=1, cmap='viridis')
plt.scatter(clusterer.cluster_centers_[:,0],
clusterer.cluster_centers_[:,1],
s=100, c='w')
from msmbuilder.msm import MarkovStateModel
from msmbuilder.utils import dump
msm = MarkovStateModel(lag_time=2, n_timescales=20)
msm.fit(clustered_trajs)
assignments = clusterer.partial_transform(txx)
assignments = msm.partial_transform(assignments)
msme.plot_free_energy(txx, obs=(0, 1), n_samples=10000,
pi=msm.populations_[assignments],
xlabel='tIC 1', ylabel='tIC 2')
plt.scatter(clusterer.cluster_centers_[msm.state_labels_, 0],
clusterer.cluster_centers_[msm.state_labels_, 1],
s=1e4 * msm.populations_, # size by population
c=msm.left_eigenvectors_[:, 1], # color by eigenvector
cmap="coolwarm",
zorder=3)
plt.colorbar(label='First dynamical eigenvector')
plt.tight_layout()
msm.timescales_
msme.plot_timescales(msm, n_timescales=5,
ylabel='Implied Timescales ($ns$)')
from msmbuilder.lumping import PCCAPlus
pcca = PCCAPlus.from_msm(msm, n_macrostates=4)
macro_trajs = pcca.transform(clustered_trajs)
msme.plot_free_energy(txx, obs=(0, 1), n_samples=10000,
pi=msm.populations_[assignments],
xlabel='tIC 1', ylabel='tIC 2')
plt.scatter(clusterer.cluster_centers_[msm.state_labels_, 0],
clusterer.cluster_centers_[msm.state_labels_, 1],
s=50,
c=pcca.microstate_mapping_,
zorder=3
)
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The dataset object
Step2: Featurization
Step3: Preprocessing
Step4: Intermediate kinetic model
Step5: tICA Histogram
Step6: Clustering
Step7: MSM
Step8: Macrostate Model
|
14,291
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import openmc
uo2 = openmc.Material(1, "uo2")
print(uo2)
mat = openmc.Material()
print(mat)
help(uo2.add_nuclide)
# Add nuclides to uo2
uo2.add_nuclide('U235', 0.03)
uo2.add_nuclide('U238', 0.97)
uo2.add_nuclide('O16', 2.0)
uo2.set_density('g/cm3', 10.0)
zirconium = openmc.Material(2, "zirconium")
zirconium.add_element('Zr', 1.0)
zirconium.set_density('g/cm3', 6.6)
water = openmc.Material(3, "h2o")
water.add_nuclide('H1', 2.0)
water.add_nuclide('O16', 1.0)
water.set_density('g/cm3', 1.0)
water.add_s_alpha_beta('c_H_in_H2O')
mats = openmc.Materials([uo2, zirconium, water])
mats = openmc.Materials()
mats.append(uo2)
mats += [zirconium, water]
isinstance(mats, list)
mats.export_to_xml()
!cat materials.xml
water.remove_nuclide('O16')
water.add_element('O', 1.0)
mats.export_to_xml()
!cat materials.xml
!cat $OPENMC_CROSS_SECTIONS | head -n 10
print(' ...')
!cat $OPENMC_CROSS_SECTIONS | tail -n 10
uo2_three = openmc.Material()
uo2_three.add_element('U', 1.0, enrichment=3.0)
uo2_three.add_element('O', 2.0)
uo2_three.set_density('g/cc', 10.0)
sph = openmc.Sphere(R=1.0)
inside_sphere = -sph
outside_sphere = +sph
print((0,0,0) in inside_sphere, (0,0,2) in inside_sphere)
print((0,0,0) in outside_sphere, (0,0,2) in outside_sphere)
z_plane = openmc.ZPlane(z0=0)
northern_hemisphere = -sph & +z_plane
northern_hemisphere.bounding_box
cell = openmc.Cell()
cell.region = northern_hemisphere
# or...
cell = openmc.Cell(region=northern_hemisphere)
cell.fill = water
universe = openmc.Universe()
universe.add_cell(cell)
# this also works
universe = openmc.Universe(cells=[cell])
universe.plot(width=(2.0, 2.0))
universe.plot(width=(2.0, 2.0), basis='xz')
universe.plot(width=(2.0, 2.0), basis='xz',
colors={cell: 'fuchsia'})
fuel_or = openmc.ZCylinder(R=0.39)
clad_ir = openmc.ZCylinder(R=0.40)
clad_or = openmc.ZCylinder(R=0.46)
fuel_region = -fuel_or
gap_region = +fuel_or & -clad_ir
clad_region = +clad_ir & -clad_or
fuel = openmc.Cell(1, 'fuel')
fuel.fill = uo2
fuel.region = fuel_region
gap = openmc.Cell(2, 'air gap')
gap.region = gap_region
clad = openmc.Cell(3, 'clad')
clad.fill = zirconium
clad.region = clad_region
pitch = 1.26
left = openmc.XPlane(x0=-pitch/2, boundary_type='reflective')
right = openmc.XPlane(x0=pitch/2, boundary_type='reflective')
bottom = openmc.YPlane(y0=-pitch/2, boundary_type='reflective')
top = openmc.YPlane(y0=pitch/2, boundary_type='reflective')
water_region = +left & -right & +bottom & -top & +clad_or
moderator = openmc.Cell(4, 'moderator')
moderator.fill = water
moderator.region = water_region
box = openmc.get_rectangular_prism(width=pitch, height=pitch,
boundary_type='reflective')
type(box)
water_region = box & +clad_or
root = openmc.Universe(cells=(fuel, gap, clad, moderator))
geom = openmc.Geometry()
geom.root_universe = root
# or...
geom = openmc.Geometry(root)
geom.export_to_xml()
!cat geometry.xml
point = openmc.stats.Point((0, 0, 0))
src = openmc.Source(space=point)
settings = openmc.Settings()
settings.source = src
settings.batches = 100
settings.inactive = 10
settings.particles = 1000
settings.export_to_xml()
!cat settings.xml
cell_filter = openmc.CellFilter(fuel)
t = openmc.Tally(1)
t.filters = [cell_filter]
t.nuclides = ['U235']
t.scores = ['total', 'fission', 'absorption', '(n,gamma)']
tallies = openmc.Tallies([t])
tallies.export_to_xml()
!cat tallies.xml
openmc.run()
!cat tallies.out
p = openmc.Plot()
p.filename = 'pinplot'
p.width = (pitch, pitch)
p.pixels = (200, 200)
p.color_by = 'material'
p.colors = {uo2: 'yellow', water: 'blue'}
plots = openmc.Plots([p])
plots.export_to_xml()
!cat plots.xml
openmc.plot_geometry()
!convert pinplot.ppm pinplot.png
from IPython.display import Image
Image("pinplot.png")
openmc.plot_inline(p)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Defining Materials
Step2: On the XML side, you have no choice but to supply an ID. However, in the Python API, if you don't give an ID, one will be automatically generated for you
Step3: We see that an ID of 2 was automatically assigned. Let's now move on to adding nuclides to our uo2 material. The Material object has a method add_nuclide() whose first argument is the name of the nuclide and second argument is the atom or weight fraction.
Step4: We see that by default it assumes we want an atom fraction.
Step5: Now we need to assign a total density to the material. We'll use the set_density for this.
Step6: You may sometimes be given a material specification where all the nuclide densities are in units of atom/b-cm. In this case, you just want the density to be the sum of the constituents. In that case, you can simply run mat.set_density('sum').
Step7: An astute observer might now point out that this water material we just created will only use free-atom cross sections. We need to tell it to use an $S(\alpha,\beta)$ table so that the bound atom cross section is used at thermal energies. To do this, there's an add_s_alpha_beta() method. Note the use of the GND-style name "c_H_in_H2O".
Step8: When we go to run the transport solver in OpenMC, it is going to look for a materials.xml file. Thus far, we have only created objects in memory. To actually create a materials.xml file, we need to instantiate a Materials collection and export it to XML.
Step9: Note that Materials is actually a subclass of Python's built-in list, so we can use methods like append(), insert(), pop(), etc.
Step10: Finally, we can create the XML file with the export_to_xml() method. In a Jupyter notebook, we can run a shell command by putting ! before it, so in this case we are going to display the materials.xml file that we created.
Step11: Element Expansion
Step12: We see that now O16 and O17 were automatically added. O18 is missing because our cross sections file (which is based on ENDF/B-VII.1) doesn't have O18. If OpenMC didn't know about the cross sections file, it would have assumed that all isotopes exist.
Step13: Enrichment
Step14: Defining Geometry
Step15: Note that by default the sphere is centered at the origin so we didn't have to supply x0, y0, or z0 arguments. Strictly speaking, we could have omitted R as well since it defaults to one. To get the negative or positive half-space, we simply need to apply the - or + unary operators, respectively.
Step16: Now let's see if inside_sphere actually contains points inside the sphere
Step17: Everything works as expected! Now that we understand how to create half-spaces, we can create more complex volumes by combining half-spaces using Boolean operators
Step18: For many regions, OpenMC can automatically determine a bounding box. To get the bounding box, we use the bounding_box property of a region, which returns a tuple of the lower-left and upper-right Cartesian coordinates for the bounding box
Step19: Now that we see how to create volumes, we can use them to create a cell.
Step20: By default, the cell is not filled by any material (void). In order to assign a material, we set the fill property of a Cell.
Step21: Universes and in-line plotting
Step22: The Universe object has a plot method that will display our the universe as current constructed
Step23: By default, the plot will appear in the $x$-$y$ plane. We can change that with the basis argument.
Step24: If we have particular fondness for, say, fuchsia, we can tell the plot() method to make our cell that color.
Step25: Pin cell geometry
Step26: With the surfaces created, we can now take advantage of the built-in operators on surfaces to create regions for the fuel, the gap, and the clad
Step27: Now we can create corresponding cells that assign materials to these regions. As with materials, cells have unique IDs that are assigned either manually or automatically. Note that the gap cell doesn't have any material assigned (it is void by default).
Step28: Finally, we need to handle the coolant outside of our fuel pin. To do this, we create x- and y-planes that bound the geometry.
Step29: The water region is going to be everything outside of the clad outer radius and within the box formed as the intersection of four half-spaces.
Step30: OpenMC also includes a factory function that generates a rectangular prism that could have made our lives easier.
Step31: Pay attention here -- the object that was returned is NOT a surface. It is actually the intersection of four surface half-spaces, just like we created manually before. Thus, we don't need to apply the unary operator (-box). Instead, we can directly combine it with +clad_or.
Step32: The final step is to assign the cells we created to a universe and tell OpenMC that this universe is the "root" universe in our geometry. The Geometry is the final object that is actually exported to XML.
Step33: Starting source and settings
Step34: Now let's create a Settings object and give it the source we created along with specifying how many batches and particles we want to run.
Step35: User-defined tallies
Step36: The what is the total, fission, absorption, and (n,$\gamma$) reaction rates in $^{235}$U. By default, if we only specify what reactions, it will gives us tallies over all nuclides. We can use the nuclides attribute to name specific nuclides we're interested in.
Step37: Similar to the other files, we need to create a Tallies collection and export it to XML.
Step38: Running OpenMC
Step39: Great! OpenMC already told us our k-effective. It also spit out a file called tallies.out that shows our tallies. This is a very basic method to look at tally data; for more sophisticated methods, see other example notebooks.
Step40: Geometry plotting
Step41: With our plot created, we need to add it to a Plots collection which can be exported to XML.
Step42: Now we can run OpenMC in plotting mode by calling the plot_geometry() function. Under the hood this is calling openmc --plot.
Step43: OpenMC writes out a peculiar image with a .ppm extension. If you have ImageMagick installed, this can be converted into a more normal .png file.
Step44: We can use functionality from IPython to display the image inline in our notebook
Step45: That was a little bit cumbersome. Thankfully, OpenMC provides us with a function that does all that "boilerplate" work.
|
14,292
|
<ASSISTANT_TASK:>
Python Code:
import zipfile
from io import BytesIO
import cv2
import gdown
import matplotlib.pyplot as plt
import numpy as np
import requests
import tensorflow as tf
import tensorflow_hub as hub
from PIL import Image
from sklearn.preprocessing import MinMaxScaler
from tensorflow import keras
RESOLUTION = 224
PATCH_SIZE = 16
crop_layer = keras.layers.CenterCrop(RESOLUTION, RESOLUTION)
norm_layer = keras.layers.Normalization(
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
variance=[(0.229 * 255) ** 2, (0.224 * 255) ** 2, (0.225 * 255) ** 2],
)
rescale_layer = keras.layers.Rescaling(scale=1.0 / 127.5, offset=-1)
def preprocess_image(image, model_type, size=RESOLUTION):
# Turn the image into a numpy array and add batch dim.
image = np.array(image)
image = tf.expand_dims(image, 0)
# If model type is vit rescale the image to [-1, 1].
if model_type == "original_vit":
image = rescale_layer(image)
# Resize the image using bicubic interpolation.
resize_size = int((256 / 224) * size)
image = tf.image.resize(image, (resize_size, resize_size), method="bicubic")
# Crop the image.
image = crop_layer(image)
# If model type is DeiT or DINO normalize the image.
if model_type != "original_vit":
image = norm_layer(image)
return image.numpy()
def load_image_from_url(url, model_type):
# Credit: Willi Gierke
response = requests.get(url)
image = Image.open(BytesIO(response.content))
preprocessed_image = preprocess_image(image, model_type)
return image, preprocessed_image
# ImageNet-1k label mapping file and load it.
mapping_file = keras.utils.get_file(
origin="https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt"
)
with open(mapping_file, "r") as f:
lines = f.readlines()
imagenet_int_to_str = [line.rstrip() for line in lines]
img_url = "https://dl.fbaipublicfiles.com/dino/img.png"
image, preprocessed_image = load_image_from_url(img_url, model_type="original_vit")
plt.imshow(image)
plt.axis("off")
plt.show()
def get_tfhub_model(model_url: str) -> tf.keras.Model:
inputs = keras.Input((RESOLUTION, RESOLUTION, 3))
hub_module = hub.KerasLayer(model_url)
outputs, attention_weights = hub_module(inputs)
return keras.Model(inputs, outputs=[outputs, attention_weights])
def get_gdrive_model(model_id: str) -> tf.keras.Model:
model_path = gdown.download(id=model_id, quiet=False)
with zipfile.ZipFile(model_path, "r") as zip_ref:
zip_ref.extractall()
model_name = model_path.split(".")[0]
inputs = keras.Input((RESOLUTION, RESOLUTION, 3))
model = keras.models.load_model(model_name, compile=False)
outputs, attention_weights = model(inputs)
return keras.Model(inputs, outputs=[outputs, attention_weights])
def get_model(url_or_id):
if "https" in url_or_id:
loaded_model = get_tfhub_model(url_or_id)
else:
loaded_model = get_gdrive_model(url_or_id)
return loaded_model
vit_base_i21k_patch16_224 = get_model("1mbtnliT3jRb3yJUHhbItWw8unfYZw8KJ")
print("Model loaded.")
predictions, attention_score_dict = vit_base_i21k_patch16_224.predict(
preprocessed_image
)
predicted_label = imagenet_int_to_str[int(np.argmax(predictions))]
print(predicted_label)
def compute_distance_matrix(patch_size, num_patches, length):
distance_matrix = np.zeros((num_patches, num_patches))
for i in range(num_patches):
for j in range(num_patches):
if i == j: # zero distance
continue
xi, yi = (int(i / length)), (i % length)
xj, yj = (int(j / length)), (j % length)
distance_matrix[i, j] = patch_size * np.linalg.norm([xi - xj, yi - yj])
return distance_matrix
def compute_mean_attention_dist(patch_size, attention_weights, model_type):
num_cls_tokens = 2 if "distilled" in model_type else 1
# The attention_weights shape = (batch, num_heads, num_patches, num_patches)
attention_weights = attention_weights[
..., num_cls_tokens:, num_cls_tokens:
] # Removing the CLS token
num_patches = attention_weights.shape[-1]
length = int(np.sqrt(num_patches))
assert length**2 == num_patches, "Num patches is not perfect square"
distance_matrix = compute_distance_matrix(patch_size, num_patches, length)
h, w = distance_matrix.shape
distance_matrix = distance_matrix.reshape((1, 1, h, w))
# The attention_weights along the last axis adds to 1
# this is due to the fact that they are softmax of the raw logits
# summation of the (attention_weights * distance_matrix)
# should result in an average distance per token.
mean_distances = attention_weights * distance_matrix
mean_distances = np.sum(
mean_distances, axis=-1
) # Sum along last axis to get average distance per token
mean_distances = np.mean(
mean_distances, axis=-1
) # Now average across all the tokens
return mean_distances
# Build the mean distances for every Transformer block.
mean_distances = {
f"{name}_mean_dist": compute_mean_attention_dist(
patch_size=PATCH_SIZE,
attention_weights=attention_weight,
model_type="original_vit",
)
for name, attention_weight in attention_score_dict.items()
}
# Get the number of heads from the mean distance output.
num_heads = tf.shape(mean_distances["transformer_block_0_att_mean_dist"])[-1].numpy()
# Print the shapes
print(f"Num Heads: {num_heads}.")
plt.figure(figsize=(9, 9))
for idx in range(len(mean_distances)):
mean_distance = mean_distances[f"transformer_block_{idx}_att_mean_dist"]
x = [idx] * num_heads
y = mean_distance[0, :]
plt.scatter(x=x, y=y, label=f"transformer_block_{idx}")
plt.legend(loc="lower right")
plt.xlabel("Attention Head", fontsize=14)
plt.ylabel("Attention Distance", fontsize=14)
plt.title("vit_base_i21k_patch16_224", fontsize=14)
plt.grid()
plt.show()
def attention_rollout_map(image, attention_score_dict, model_type):
num_cls_tokens = 2 if "distilled" in model_type else 1
# Stack the individual attention matrices from individual Transformer blocks.
attn_mat = tf.stack([attention_score_dict[k] for k in attention_score_dict.keys()])
attn_mat = tf.squeeze(attn_mat, axis=1)
# Average the attention weights across all heads.
attn_mat = tf.reduce_mean(attn_mat, axis=1)
# To account for residual connections, we add an identity matrix to the
# attention matrix and re-normalize the weights.
residual_attn = tf.eye(attn_mat.shape[1])
aug_attn_mat = attn_mat + residual_attn
aug_attn_mat = aug_attn_mat / tf.reduce_sum(aug_attn_mat, axis=-1)[..., None]
aug_attn_mat = aug_attn_mat.numpy()
# Recursively multiply the weight matrices.
joint_attentions = np.zeros(aug_attn_mat.shape)
joint_attentions[0] = aug_attn_mat[0]
for n in range(1, aug_attn_mat.shape[0]):
joint_attentions[n] = np.matmul(aug_attn_mat[n], joint_attentions[n - 1])
# Attention from the output token to the input space.
v = joint_attentions[-1]
grid_size = int(np.sqrt(aug_attn_mat.shape[-1]))
mask = v[0, num_cls_tokens:].reshape(grid_size, grid_size)
mask = cv2.resize(mask / mask.max(), image.size)[..., np.newaxis]
result = (mask * image).astype("uint8")
return result
attn_rollout_result = attention_rollout_map(
image, attention_score_dict, model_type="original_vit"
)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 10))
fig.suptitle(f"Predicted label: {predicted_label}.", fontsize=20)
_ = ax1.imshow(image)
_ = ax2.imshow(attn_rollout_result)
ax1.set_title("Input Image", fontsize=16)
ax2.set_title("Attention Map", fontsize=16)
ax1.axis("off")
ax2.axis("off")
fig.tight_layout()
fig.subplots_adjust(top=1.35)
fig.show()
# Load the model.
vit_dino_base16 = get_model("16_1oDm0PeCGJ_KGBG5UKVN7TsAtiRNrN")
print("Model loaded.")
# Preprocess the same image but with normlization.
img_url = "https://dl.fbaipublicfiles.com/dino/img.png"
image, preprocessed_image = load_image_from_url(img_url, model_type="dino")
# Grab the predictions.
predictions, attention_score_dict = vit_dino_base16.predict(preprocessed_image)
def attention_heatmap(attention_score_dict, image, model_type="dino"):
num_tokens = 2 if "distilled" in model_type else 1
# Sort the Transformer blocks in order of their depth.
attention_score_list = list(attention_score_dict.keys())
attention_score_list.sort(key=lambda x: int(x.split("_")[-2]), reverse=True)
# Process the attention maps for overlay.
w_featmap = image.shape[2] // PATCH_SIZE
h_featmap = image.shape[1] // PATCH_SIZE
attention_scores = attention_score_dict[attention_score_list[0]]
# Taking the representations from CLS token.
attentions = attention_scores[0, :, 0, num_tokens:].reshape(num_heads, -1)
# Reshape the attention scores to resemble mini patches.
attentions = attentions.reshape(num_heads, w_featmap, h_featmap)
attentions = attentions.transpose((1, 2, 0))
# Resize the attention patches to 224x224 (224: 14x16).
attentions = tf.image.resize(
attentions, size=(h_featmap * PATCH_SIZE, w_featmap * PATCH_SIZE)
)
return attentions
# De-normalize the image for visual clarity.
in1k_mean = tf.constant([0.485 * 255, 0.456 * 255, 0.406 * 255])
in1k_std = tf.constant([0.229 * 255, 0.224 * 255, 0.225 * 255])
preprocessed_img_orig = (preprocessed_image * in1k_std) + in1k_mean
preprocessed_img_orig = preprocessed_img_orig / 255.0
preprocessed_img_orig = tf.clip_by_value(preprocessed_img_orig, 0.0, 1.0).numpy()
# Generate the attention heatmaps.
attentions = attention_heatmap(attention_score_dict, preprocessed_img_orig)
# Plot the maps.
fig, axes = plt.subplots(nrows=3, ncols=4, figsize=(13, 13))
img_count = 0
for i in range(3):
for j in range(4):
if img_count < len(attentions):
axes[i, j].imshow(preprocessed_img_orig[0])
axes[i, j].imshow(attentions[..., img_count], cmap="inferno", alpha=0.6)
axes[i, j].title.set_text(f"Attention head: {img_count}")
axes[i, j].axis("off")
img_count += 1
# Extract the projections.
projections = (
vit_base_i21k_patch16_224.layers[1]
.get_layer("projection")
.get_layer("conv_projection")
.kernel.numpy()
)
projection_dim = projections.shape[-1]
patch_h, patch_w, patch_channels = projections.shape[:-1]
# Scale the projections.
scaled_projections = MinMaxScaler().fit_transform(
projections.reshape(-1, projection_dim)
)
# Reshape the scaled projections so that the leading
# three dimensions resemble an image.
scaled_projections = scaled_projections.reshape(patch_h, patch_w, patch_channels, -1)
# Visualize the first 128 filters of the learned
# projections.
fig, axes = plt.subplots(nrows=8, ncols=16, figsize=(13, 8))
img_count = 0
limit = 128
for i in range(8):
for j in range(16):
if img_count < limit:
axes[i, j].imshow(scaled_projections[..., img_count])
axes[i, j].axis("off")
img_count += 1
fig.tight_layout()
position_embeddings = vit_base_i21k_patch16_224.layers[1].positional_embedding.numpy()
# Discard the batch dimension and the position embeddings of the
# cls token.
position_embeddings = position_embeddings.squeeze()[1:, ...]
similarity = position_embeddings @ position_embeddings.T
plt.imshow(similarity, cmap="inferno")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Constants
Step2: Data utilities
Step3: Load a test image and display it
Step4: Load a model
Step5: More about the model
Step6: attention_score_dict contains the attention scores (softmaxed outputs) from each
Step7: Thanks to Simon Kornblith
Step8: Inspecting the plots
Step9: Let's now use these utilities to generate an attention plot based on our previous results
Step10: Inspecting the plots
Step11: A Transformer block consists of multiple heads. Each head in a Transformer block projects
Step12: We can use the same image we used for inference with DINO and the attention_score_dict
Step13: Inspecting the plots
Step14: Inspecting the plots
|
14,293
|
<ASSISTANT_TASK:>
Python Code:
m.layer_names
channel = m.monitor.channels["valid_y_nll"]
hl.Curve(zip(channel.epoch_record, channel.val_record),label="valid_y_nll")
channel = m.monitor.channels["valid_y_nll"]
plt.plot(channel.epoch_record, channel.val_record)
ch1 = m.monitor.channels["valid_y_nll"]
ch2 = m.monitor.channels["train_y_nll"]
hl.Curve(zip(ch1.epoch_record[-40:], ch1.val_record[-40:]),label="valid_y_nll")
hl.Curve(zip(ch2.epoch_record[-40:], ch2.val_record[-40:]),label="train_y_nll")
m = pylearn2.utils.serial.load(
"/disk/scratch/neuroglycerin/models/continue_hopeful_recent.pkl")
make_curves(m,"valid_objective","valid_y_nll","train_y_nll")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hard to see whether it is still learning...
|
14,294
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
from mne.report import Report
from mne.datasets import sample
from mne import read_evokeds
from matplotlib import pyplot as plt
data_path = sample.data_path()
meg_path = data_path + '/MEG/sample'
subjects_dir = data_path + '/subjects'
evoked_fname = meg_path + '/sample_audvis-ave.fif'
report = Report(image_format='png', subjects_dir=subjects_dir,
info_fname=evoked_fname, subject='sample',
raw_psd=False) # use False for speed here
report.parse_folder(meg_path, on_error='ignore', mri_decim=10)
# Load the evoked data
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(None, 0), verbose=False)
evoked.crop(0, .2)
times = evoked.times[::4]
# Create a list of figs for the slider
figs = list()
for t in times:
figs.append(evoked.plot_topomap(t, vmin=-300, vmax=300, res=100,
show=False))
plt.close(figs[-1])
report.add_slider_to_section(figs, times, 'Evoked Response',
image_format='png') # can also use 'svg'
# Save the report
report.save('my_report.html', overwrite=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Do standard folder parsing (this can take a couple of minutes)
Step2: Add a custom section with an evoked slider
|
14,295
|
<ASSISTANT_TASK:>
Python Code:
import pickle
import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse.linalg as spsla
import tectosaur as tct
with open('wenchuan_mesh.pkl', 'rb') as f:
m = pickle.load(f)
m.n_tris(), m.n_tris('surf'), m.n_tris('fault')
plt.figure(figsize = (10,10))
plt.triplot(m.pts[:,0], m.pts[:,1], m.tris, linewidth = 0.5)
plt.gca().set_aspect('equal', adjustable = 'box')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.show()
plt.figure(figsize = (10,10))
plt.triplot(m.pts[:,0], m.pts[:,1], m.get_tris('fault'), linewidth = 0.5)
plt.gca().set_aspect('equal', adjustable = 'box')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.show()
levels = np.linspace(0, 5500, 12)
plt.figure(figsize = (10,8))
cntf = plt.tricontourf(
m.pts[:,0], m.pts[:,1], m.get_tris('surf'), m.pts[:,2], levels = levels
)
plt.tricontour(
m.pts[:,0], m.pts[:,1], m.get_tris('surf'), m.pts[:,2],
linestyles = 'solid', colors='k', linewidths = 0.25, levels = levels
)
plt.colorbar(cntf)
plt.gca().set_aspect('equal', adjustable = 'box')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.show()
# First, we get an array of (n_fault_tris, 3, 3) that contains the three
# points for the vertices of each triangle
fault_tri_pts = m.pts[m.get_tris('fault')]
# Then, we compute the normal vector:
fault_normals = np.cross(
fault_tri_pts[:,2,:] - fault_tri_pts[:,0,:],
fault_tri_pts[:,2,:] - fault_tri_pts[:,1,:]
)
fault_normals /= np.linalg.norm(fault_normals, axis = 1)[:, np.newaxis]
# Next, we project our slip vector (0, 0, 1) into the plane defined
# by the triangle normal vectors
v = [0,0,1]
tri_slip = v - fault_normals * (fault_normals.dot(v)[:, np.newaxis])
# Finally, we need a slip value for each degree of freedom. Each triangle
# has a degree of freedom located at each of its vertices. We will simply
# replicate the slip field so that the slip is the same at all three of a
# triangles vertices.
slip = -np.tile(tri_slip[:,np.newaxis,:], (1,3,1))
# We need to determine the slip field at each point rather than each degree of freedom!
pt_slip = np.zeros((m.pts.shape[0], 3))
pt_slip[m.get_tris('fault')] = slip.reshape((-1,3,3))
levels = np.linspace(-1, 1, 21)
plt.figure(figsize = (15,4))
for d in range(3):
plt.subplot(1,3,d + 1)
plt.title(['x', 'y', 'z'][d])
plt.tricontourf(m.pts[:,0], m.pts[:,1], m.get_tris('fault'), pt_slip[:, d], linewidth = 0.5, levels = levels)
plt.xlim([np.min(m.pts[m.get_tris('fault'),0]), np.max(m.pts[m.get_tris('fault'),0])])
plt.ylim([np.min(m.pts[m.get_tris('fault'),1]), np.max(m.pts[m.get_tris('fault'),1])])
plt.gca().set_aspect('equal', adjustable = 'box')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if d == 2:
plt.colorbar()
plt.tight_layout()
plt.show()
sm = 3e10 # Shear modulus
pr = 0.25 # Poisson ratio
T = tct.RegularizedSparseIntegralOp(
8, # The coincident quadrature order
8, # The edge adjacent quadrature order
8, # The vertex adjacent quadrature order
2, # The farfield quadrature order
5, # The nearfield quadrature order
2.5, # The element length factor to separate near from farfield.
'elasticRT3', # The Green's function to integrate
'elasticRT3', #...
[sm, pr], # The material parameters (shear modulus, poisson ratio)
m.pts, # The mesh points
m.tris, # The mesh triangles
np.float32, # The float type to use. float32 is much faster on most GPUs
# Finally, do we use a direct (dense) farfield operator or do we use the Fast Multipole Method?
farfield_op_type = tct.TriToTriDirectFarfieldOp
#farfield_op_type = FMMFarfieldOp(mac = 4.5, pts_per_cell = 100)
)
mass = tct.MassOp(3, m.pts, m.tris)
lhs = tct.SumOp([T, tct.MultOp(mass, 0.5)])
bc_cs = tct.all_bc_constraints(
m.n_tris('surf'), # The first triangle index to apply BCs to. The first fault triangle is at index `n_surf_tris`.
m.n_tris(), # The last triangle index to apply BCs to.
slip.flatten() # The BC vector should be n_tris * 9 elements long.
)
continuity_cs = tct.continuity_constraints(
m.pts, # The mesh points.
m.tris, # The mesh triangles
m.n_tris('surf') # How many surface triangles are there? The triangles are expected to be arranged so that the surface triangles come first. The remaining triangles are assumed to be fault triangles.
)
cs = bc_cs + continuity_cs
cm, c_rhs, _ = tct.build_constraint_matrix(cs, lhs.shape[1])
rhs_constrained = cm.T.dot(-lhs.dot(c_rhs))
def mv(v, it = [0]):
it[0] += 1
print('iteration # ' + str(it[0]))
return cm.T.dot(lhs.dot(cm.dot(v)))
n = rhs_constrained.shape[0]
A = spsla.LinearOperator((n, n), matvec = mv)
gmres_out = spsla.gmres(
A, rhs_constrained, tol = 1e-6, restart = 200,
callback = lambda R: print('residual: ', str(R))
)
soln = cm.dot(gmres_out[0]) + c_rhs
pt_disp = np.zeros((m.pts.shape[0], 3))
pt_disp[m.get_tris('surf')] = m.get_dofs(soln, 'surf').reshape((-1,3,3))
view_center = np.mean(m.pts[m.get_tris('fault'),:].reshape((-1,3)), axis = 0)
view_R = 200000
levels = np.linspace(-1, 1, 21)
for d in range(3):
plt.figure(figsize = (6,4))
plt.title('$u_' + ['x', 'y', 'z'][d] + '$')
cntf = plt.tricontourf(
m.pts[:,0], m.pts[:,1], m.get_tris('surf'), pt_disp[:, d],
linewidth = 0.5, levels = levels, extend = 'both'
)
plt.tricontour(
m.pts[:,0], m.pts[:,1], m.get_tris('surf'), pt_disp[:, d],
linestyles = 'solid', colors='k', linewidths = 0.5, levels = levels
)
plt.xlim([view_center[0] - view_R, view_center[0] + view_R])
plt.ylim([view_center[1] - view_R, view_center[1] + view_R])
plt.gca().set_aspect('equal', adjustable = 'box')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.colorbar(cntf)
plt.tight_layout()
plt.show()
fault_start_idx = m.get_start('fault')
side = tct.continuity.get_side_of_fault(m.pts, m.tris, fault_start_idx)
plt.tripcolor(m.pts[:,0], m.pts[:,1], m.get_tris('surf'), side[:m.n_tris('surf')])
plt.gca().set_aspect('equal', adjustable = 'box')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.xlim([view_center[0] - view_R / 1.5, view_center[0] + view_R / 1.5])
plt.ylim([view_center[1] - view_R / 1.5, view_center[1] + view_R / 1.5])
plt.show()
surf_verts = np.unique(m.get_tris('surf'))
surf_fault_edges = []
for i, t in enumerate(m.get_tris('fault')):
in_surf = []
for d in range(3):
if t[d] in surf_verts:
in_surf.append((i, d))
if len(in_surf) == 2:
surf_fault_edges.append(in_surf)
view_center = np.mean(m.pts[m.get_tris('fault'),:].reshape((-1,3)), axis = 0)
view_R = 250000
cmap = 'RdBu_r'
levels = [
np.linspace(-0.5, 0.1, 11),
np.linspace(-0.3, 0.3, 11),
np.linspace(0, 0.7, 11)
]
for d in range(3):
field = m.get_dofs(soln, 'surf').reshape((-1,3,3))[:,:, d]
plt.figure(figsize = (8,6))
plt.title('$u_' + ['x', 'y', 'z'][d] + '$')
# This is the critical section of code. We take all the triangles except one of the fault touching
# sides and make a contour plot with them. The combination of the two contour plots results in a
# nice clean edge.
for i in range(2):
which_tris = np.where(np.logical_or(side[:fault_start_idx] == 0, side[:fault_start_idx] == i + 1))[0]
reduced_m = tct.mesh.modify.remove_unused_pts((m.pts, m.tris[which_tris]))
soln_vals = np.empty(reduced_m[0].shape[0])
soln_vals[reduced_m[1]] = field[which_tris]
cntf = plt.tricontourf(
reduced_m[0][:,0], reduced_m[0][:,1], reduced_m[1], soln_vals,
cmap = cmap, linewidth = 0.5, levels = levels[d], extend = 'both'
)
plt.tricontour(
reduced_m[0][:,0], reduced_m[0][:,1], reduced_m[1], soln_vals,
linestyles = 'solid', colors='k', linewidths = 0.5, levels = levels[d]
)
# Here, we plot the fault trace edges.
fault_tris = m.get_tris('fault')
for e in surf_fault_edges:
i1, d1 = e[0]
i2, d2 = e[1]
pts = m.pts[[fault_tris[i1,d1], fault_tris[i2,d2]]]
plt.plot(pts[:,0], pts[:,1], 'k-', linewidth = 2)
plt.gca().set_aspect('equal', adjustable = 'box')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.xlim([view_center[0] - view_R, view_center[0] + view_R])
plt.ylim([view_center[1] - view_R, view_center[1] + view_R])
plt.colorbar(cntf)
plt.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And the premade mesh that we're going to use
Step2: m is now a CombinedMesh object which is a handy class for tracking different subsets of a mesh. Let's explore. How many elements are there in this mesh? How about in each of the subsets?
Step3: Let's plot up the mesh to get a sense for what we're dealing with! Everything is projected into UTM 48R.
Step4: We can see the fault mesh in the center, with the surface elements getting larger further from the fault. Let's zoom in and just look at the fault surface.
Step5: There are two separate fault surfaces. The western surface (on the left) is the Beichuan fault, while the eastern surface that extends far to the northeast is the Pengguan fault. This fault geometry is based on structural work by Hubbard, Shaw and Klinger (2009).
Step6: Fantastic. Next, we'll set up a sample slip field on the fault surface. For simplicity, the slip field will be exclusively thrust motion. To do that, we'll project the vertical vector $(0,0,1)$ into the plane of each triangle.
Step7: Let's plot the x, y and z components of that slip field just to get a sense of it.
Step8: Next, we'll build the boundary element matrix, T, the mass matrix, the constraint matrices and then solve the linear system. This code is almost identical to the code in the Okada example, so I won't explain it in detail. That similarity is one of the powerful aspects of Tectosaur. With almost exactly the same code we can solve a boundary element problem with a small planar fault or a large complex fault geometry including topography.
Step9: With the solution in hand, let's make a rough plot of each component of the surface displacement field.
Step10: It's exciting! There's some interesting behavior going on, especially near the fault trace. But, these figures really aren't very interpretable near the fault trace. That's primarily because the tricontourf function doesn't handle discontinuity. As a result, it tries the smooth the jump in displacement across the fault. That's why there are several contour lines bunched close together near the fault trace. Let's try to make a better figure.
Step11: Perfect! Most of the mesh is not touching the fault, so it's marked 0. The triangles on the northwestern side are marked 1 and triangles are southeastern side are marked 2. It's also nice to identify the edges in the mesh that form the fault trace
Step12: Now, let's make that figure!
|
14,296
|
<ASSISTANT_TASK:>
Python Code:
from datetime import datetime
print "Notebook last modified/run: {}".format(datetime.now())
from math import factorial
kNumBits = 1.e12 # 'n' in the equation, above.
kBER = 1.e-12 # 'p-sub-A'.
def prob(m):
'Probabillity of observing m errors.'
return pow(kBER, m) * pow((1 - kBER), (kNumBits - m)) * factorial(kNumBits) / factorial(kNumBits - m) / factorial(m)
for m in range(3):
print "P(%d):" % m, prob(m)
%matplotlib inline
from matplotlib import pyplot as plt
from time import clock
from numpy import array
test_vals = array([1, 2, 5, 10, 20, 50, 100]) * 1e3
run_times = []
results = []
for i in test_vals:
start = clock()
results.append(factorial(i))
run_times.append(clock() - start)
plt.plot(test_vals, run_times)
plt.title("Factorial function run time vs. input argument")
plt.xlabel("Input Argument")
plt.ylabel("Run Time (s)")
def prob2(m):
'Uses approximation to eliminate large factorials.'
return pow(kNumBits * kBER, m) * pow((1 - kBER), (kNumBits - m)) / factorial(m)
for m in range(3):
print "P(%d):" % m, prob2(m)
from numpy import exp
def prob3(m):
'Uses a further approximation to eliminate large exponential term.'
return pow(kNumBits * kBER, m) * exp(-kNumBits * kBER) / factorial(m)
for m in range(3):
print "P(%d):" % m, prob3(m)
kNumTrials = 1000000
start = clock()
for i in range(kNumTrials):
prob2(1)
print "Partial approximation took %e seconds per run." % ((clock() - start) / kNumTrials)
start = clock()
for i in range(kNumTrials):
prob3(1)
print "Full approximation took %e seconds per run." % ((clock() - start) / kNumTrials)
# Redefine our probability calculation function to take in 'n' and 'p' as arguments.
def prob2(m, n=kNumBits, p=kBER): # By giving n and p the proper defaults, we avoid breaking older client code.
Calculate the probability of observing m errors in a stream of n bits, given link BER p.
Uses approximation to eliminate large factorials.
return pow(n * p, m) * pow((1 - p), (n - m)) / factorial(m)
# Define our confidence interval calculation function.
def conf_int(m_hat, n, ber):
Calculate the confidence interval of a BER estimation.
Inputs:
- m_hat Number of errors observed.
- n Number of bits received.
- ber Estimate of link bit error ratio.
return 1 - sum(prob2(m, n, ber) for m in range(m_hat + 1))
# Calculate the actual confidence interval of our junior engineer's claim.
print "Confidence in claim, 'BER <= 1e-12', after observing no errors in 1e12 received bits:", conf_int(0, 1e12, 1.e-12)
# Plot confidence interval vs. error-free observation time.
intervals = [1, 2, 3, 4, 5, 10] # Numbers are normalized to the quantity '1/BER' bits.
cis = []
for interval in intervals:
cis.append(conf_int(0, interval * kNumBits, kBER))
plt.plot(intervals, cis)
plt.title("Confidence Interval vs. Error-free Observation Time")
plt.xlabel("Observation Time (Normalized to Tbit/BER)")
plt.ylabel("Confidence Interval")
# Plot confidence interval vs. single error observation time.
intervals = [1, 2, 3, 4, 5, 6, 10] # Numbers are normalized to the quantity '1/BER' bits.
cis = []
for interval in intervals:
cis.append(conf_int(1, interval * kNumBits, kBER))
plt.plot(intervals, cis)
plt.title("Confidence Interval vs. Single Error Observation Time")
plt.xlabel("Observation Time (Normalized to Tbit/BER)")
plt.ylabel("Confidence Interval")
# Plot confidence interval vs. observation time, for several different numbers of observed errors.
errors = [0, 1, 2, 5, 10]
intervals = range(20) # Numbers are normalized to the quantity '1/BER' bits.
ciss = []
for error in errors:
cis = []
for interval in intervals:
cis.append(conf_int(error, interval * kNumBits, kBER))
ciss.append(cis)
for (error, cis) in zip(errors, ciss):
plt.plot(intervals, cis, label="{} errors".format(error))
plt.title("Confidence Interval vs. Observation Time, by Number of Observed Errors")
plt.xlabel("Observation Time (Normalized to Tbit/BER)")
plt.ylabel("Confidence Interval")
plt.legend(loc="lower right")
bers = [1e-11, 1e-12, 1e-13]
for ber in bers:
print "BER better than {} with confidence greater than {}".format(ber, conf_int(0, 5 * kNumBits, ber))
from numpy import arange
errors = [0, 1, 2, 5, 10]
times = [5, 7, 9, 14, 21]
bers = arange(1e-15, 2e-12, 1e-14)
ciss = []
for (error, time) in zip(errors, times):
cis = []
for ber in bers:
cis.append(conf_int(error, time * kNumBits, ber))
plt.figure(1)
plt.plot(bers, cis, label="{} errors".format(error))
plt.figure(2)
plt.plot(bers, cis, label="{} errors".format(error))
ciss.append(cis)
plt.figure(1)
plt.title("Confidence Interval vs. BER Estimate")
plt.xlabel("BER Estimate")
plt.ylabel("Confidence Interval")
plt.legend(loc="lower right")
plt.axis(ymin=0.98, xmin=0.9e-12, xmax=1.1e-12)
plt.figure(2)
plt.title("Confidence Interval vs. BER Estimate")
plt.xlabel("BER Estimate")
plt.ylabel("Confidence Interval")
plt.legend(loc="lower right")
plt.axis(xmax=1e-12)
from numpy import where
ranges = []
for cis in map(array, ciss):
inrange = map(lambda x: (x > 0.70) and (x < 0.99), cis)
idxs = where(inrange)[0]
ranges.append(bers[idxs[-1]] - bers[idxs[0]])
plt.plot(times, ranges)
plt.title("Useful BER Range vs. Observation Time")
plt.xlabel("Observation Time (Normalized to Tbit/BER)")
plt.ylabel("Useful BER Range")
cis = []
for ber in bers:
cis.append(conf_int(373, 527.1e12, ber))
plt.plot(bers, cis)
plt.title("Confidence Interval vs. BER Estimate")
plt.xlabel("BER Estimate")
plt.ylabel("Confidence Interval")
from scipy.stats import poisson
# Define a new confidence interval calculation function, which uses scipy.stats.poisson.
def conf_int2(m_hat, n, ber):
Calculate the confidence interval of a BER estimation.
Inputs:
- m_hat Number of errors observed.
- n Number of bits received.
- ber Estimate of link bit error ratio.
return 1 - sum(poisson.pmf(m, n * ber) for m in range(m_hat + 1))
cis = []
for ber in bers:
cis.append(conf_int2(373, 527.1e12, ber))
plt.plot(bers, cis)
plt.title("Confidence Interval vs. BER Estimate")
plt.xlabel("BER Estimate")
plt.ylabel("Confidence Interval")
plt.axis(xmin=0.5e-12, xmax=1e-12)
inrange = map(lambda x: (x > 0.70) and (x < 0.99), cis)
idxs = where(inrange)[0]
print "Useful BER estimate range: {}".format(bers[idxs[-1]] - bers[idxs[0]])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Contents <a id="contents"/>
Step2: I had to give up, after 5 hours, and interrupt the code, above. What happened?!
Step3: The problem is the run time of the factorial function is increasing exponentially with increasing input argument value, and we are sending in some really large values! We need to eliminate those large factorials.
Step4: And we have solved our run time problems; yeah!
Step5: Okay, that worked, as well. I wonder which one is faster...
Step8: Since the partial approximation runs approximately twice as fast and represents a smaller deviation from the true answer, we'll use it going forward.
Step9: And we see that the confidence interval of our young engineer's claim is only 63%!
Step10: As you can see, it requires observing 5e12 error-free bits, before we can claim that we have a 1e-12 BER, with 99% certainty.
Step11: We see that we need to extend our observation time to 6 normalized time units, in order to achieve the same level of confidence in our estimate of the BER. This is intuitively satisfying, as we observed an error afterall.
Step12: At first glance, we see what we expected
Step13: Let's see what this trend looks like when plotted, using several different values for the number of observed errors. For each value chosen, we'll adjust the observation time, so as to just provide 99% confidence in a BER estimate of 10e-12. And, in that way, we hope to be doing an "apples to apples" comparison. We'll produce two plots
Step14: From the first plot, above, we can see that we've done a good job of normalizing the observation times, as all 5 curves fall within, approximately, a +/- 0.1% range, regarding the confidence of a 1e-12 BER estimate.
Step15: Another interpretation of this is that longer observation periods yield more precise measures of system BER.
Step17: Unfortunately, we can't use our prob2() or prob3() functions with this large a number of observed errors. However, we have an alternative. The scipy.stats module contains a member
Step18: It appears that our system is running at a BER slightly better than 1e-12 (using our standard 99% confidence threshold). And this is what our BERTScope reports, as well.
|
14,297
|
<ASSISTANT_TASK:>
Python Code:
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.layers import Dense, Flatten, Softmax
print(tf.__version__)
!python3 -m pip freeze | grep 'tensorflow==2\|tensorflow-gpu==2' || \
python3 -m pip install tensorflow==2
mnist = tf.keras.datasets.mnist.load_data()
(x_train, y_train), (x_test, y_test) = mnist
HEIGHT, WIDTH = x_train[0].shape
NCLASSES = tf.size(tf.unique(y_train).y)
print("Image height x width is", HEIGHT, "x", WIDTH)
tf.print("There are", NCLASSES, "classes")
IMGNO = 12
# Uncomment to see raw numerical values.
# print(x_test[IMGNO])
plt.imshow(x_test[IMGNO].reshape(HEIGHT, WIDTH));
print("The label for image number", IMGNO, "is", y_test[IMGNO])
def linear_model():
model = Sequential([
Flatten(),
Dense(NCLASSES),
Softmax()
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
BUFFER_SIZE = 5000
BATCH_SIZE = 100
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
def load_dataset(training=True):
Loads MNIST dataset into a tf.data.Dataset
(x_train, y_train), (x_test, y_test) = mnist
x = x_train if training else x_test
y = y_train if training else y_test
# One-hot encode the classes
y = tf.keras.utils.to_categorical(y, NCLASSES)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(scale).batch(BATCH_SIZE)
if training:
dataset = dataset.shuffle(BUFFER_SIZE).repeat()
return dataset
def create_shape_test(training):
dataset = load_dataset(training=training)
data_iter = dataset.__iter__()
(images, labels) = data_iter.get_next()
expected_image_shape = (BATCH_SIZE, HEIGHT, WIDTH)
expected_label_ndim = 2
assert(images.shape == expected_image_shape)
assert(labels.numpy().ndim == expected_label_ndim)
test_name = 'training' if training else 'eval'
print("Test for", test_name, "passed!")
create_shape_test(True)
create_shape_test(False)
NUM_EPOCHS = 10
STEPS_PER_EPOCH = 100
model = linear_model()
train_data = load_dataset()
validation_data = load_dataset(training=False)
OUTDIR = "mnist_linear/"
checkpoint_callback = ModelCheckpoint(
OUTDIR, save_weights_only=True, verbose=1)
tensorboard_callback = TensorBoard(log_dir=OUTDIR)
history = model.fit(
train_data,
validation_data=validation_data,
epochs=NUM_EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
verbose=2,
callbacks=[checkpoint_callback, tensorboard_callback]
)
BENCHMARK_ERROR = .12
BENCHMARK_ACCURACY = 1 - BENCHMARK_ERROR
accuracy = history.history['accuracy']
val_accuracy = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
assert(accuracy[-1] > BENCHMARK_ACCURACY)
assert(val_accuracy[-1] > BENCHMARK_ACCURACY)
print("Test to beat benchmark accuracy passed!")
assert(accuracy[0] < accuracy[1])
assert(accuracy[1] < accuracy[-1])
assert(val_accuracy[0] < val_accuracy[1])
assert(val_accuracy[1] < val_accuracy[-1])
print("Test model accuracy is improving passed!")
assert(loss[0] > loss[1])
assert(loss[1] > loss[-1])
assert(val_loss[0] > val_loss[1])
assert(val_loss[1] > val_loss[-1])
print("Test loss is decreasing passed!")
image_numbers = range(0, 10, 1) # Change me, please.
def load_prediction_dataset():
dataset = (x_test[image_numbers], y_test[image_numbers])
dataset = tf.data.Dataset.from_tensor_slices(dataset)
dataset = dataset.map(scale).batch(len(image_numbers))
return dataset
predicted_results = model.predict(load_prediction_dataset())
for index, prediction in enumerate(predicted_results):
predicted_value = np.argmax(prediction)
actual_value = y_test[image_numbers[index]]
if actual_value != predicted_value:
print("image number: " + str(image_numbers[index]))
print("the prediction was " + str(predicted_value))
print("the actual label is " + str(actual_value))
print("")
bad_image_number = 8
plt.imshow(x_test[bad_image_number].reshape(HEIGHT, WIDTH));
DIGIT = 0 # Change me to be an integer from 0 to 9.
LAYER = 1 # Layer 0 flattens image, so no weights
WEIGHT_TYPE = 0 # 0 for variable weights, 1 for biases
dense_layer_weights = model.layers[LAYER].get_weights()
digit_weights = dense_layer_weights[WEIGHT_TYPE][:, DIGIT]
plt.imshow(digit_weights.reshape((HEIGHT, WIDTH)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring the data
Step2: Each image is 28 x 28 pixels and represents a digit from 0 to 9. These images are black and white, so each pixel is a value from 0 (white) to 255 (black). Raw numbers can be hard to interpret sometimes, so we can plot the values to see the handwritten digit as an image.
Step3: Define the model
Step5: Write Input Functions
Step6: Time to train the model! The original MNIST linear classifier had an error rate of 12%. Let's use that to sanity check that our model is learning.
Step7: Evaluating Predictions
Step8: It's understandable why the poor computer would have some trouble. Some of these images are difficult for even humans to read. In fact, we can see what the computer thinks each digit looks like.
|
14,298
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
from os.path import join
from pylab import rcParams
import matplotlib.pyplot as plt
rcParams['figure.figsize'] = (13, 6)
plt.style.use('ggplot')
#import nilmtk
from nilmtk import DataSet, TimeFrame, MeterGroup, HDFDataStore
from nilmtk.disaggregate.hart_85 import Hart85
from nilmtk.disaggregate import CombinatorialOptimisation
from nilmtk.utils import print_dict, show_versions
from nilmtk.metrics import f1_score
#import seaborn as sns
#sns.set_palette("Set3", n_colors=12)
import warnings
warnings.filterwarnings("ignore") #suppress warnings, comment out if warnings required
#uncomment if required
#show_versions()
data_dir = '/Users/GJWood/nilm_gjw_data/HDF5/'
gjw = DataSet(join(data_dir, 'nilm_gjw_data.hdf5'))
print('loaded ' + str(len(gjw.buildings)) + ' buildings')
building_number=1
gjw.store.window = TimeFrame(start='2015-09-03 00:00:00+01:00', end='2015-09-05 00:00:00+01:00')
gjw.set_window = TimeFrame(start='2015-09-03 00:00:00+01:00', end='2015-09-05 00:00:00+01:00')
elec = gjw.buildings[building_number].elec
mains = elec.mains()
mains.plot()
#plt.show()
house = elec['fridge'] #only one meter so any selection will do
df = house.load().next() #load the first chunk of data into a dataframe
#df.info() #check that the data is what we want (optional)
#note the data has two columns and a time index
#df.head()
#df.tail()
#df.plot()
#plt.show()
#df.ix['2015-09-03 11:00:00+01:00':'2015-09-03 12:00:00+01:00'].plot()# select a time range and plot it
#plt.show()
h = Hart85()
h.train(mains,cols=[('power','active')])
h.steady_states
ax = mains.plot()
h.steady_states['active average'].plot(style='o', ax = ax);
plt.ylabel("Power (W)")
plt.xlabel("Time");
#plt.show()
disag_filename = join(data_dir, 'disag_gjw_hart.hdf5')
output = HDFDataStore(disag_filename, 'w')
h.disaggregate(mains,output,sample_period=1)
output.close()
disag_hart = DataSet(disag_filename)
disag_hart
disag_hart_elec = disag_hart.buildings[building_number].elec
disag_hart_elec
co = CombinatorialOptimisation()
co.train(mains,cols=[('power','active')])
co.steady_states
ax = mains.plot()
co.steady_states['active average'].plot(style='o', ax = ax);
plt.ylabel("Power (W)")
plt.xlabel("Time");
disag_filename = join(data_dir, 'disag_gjw_co.hdf5')
output = HDFDataStore(disag_filename, 'w')
co.disaggregate(mains,output,sample_period=1)
output.close()
from nilmtk.metrics import f1_score
f1_hart= f1_score(disag_hart_elec, test_elec)
f1_hart.index = disag_hart_elec.get_labels(f1_hart.index)
f1_hart.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('f-score');
plt.title("Hart");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: show versions for any diagnostics
Step2: Load dataset
Step3: Let us perform our analysis on selected 2 days
Step4: Hart Training
Step5: Hart Disaggregation
Step6: Combinatorial Optimisation training
Step7: Can't use because no test data for comparison
|
14,299
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import openpathsampling as paths
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from openpathsampling.visualize import PathTreeBuilder, PathTreeBuilder
from IPython.display import SVG, HTML
def ipynb_visualize(movevis):
Default settings to show a movevis in an ipynb.
view = movevis.renderer
view.zoom = 1.5
view.scale_y = 18
view.scale_th = 20
view.font_size = 0.4
return view
old_store = paths.AnalysisStorage("mstis.nc")
#old_store = paths.Storage("mstis.nc") # if not actually doing analysis, but loading network, etc
network = old_store.networks[0]
engine = old_store.engines[0]
template = old_store.snapshots[0]
# this is how we would get it out of a simulation (although the actual simulation here has bad stats)
# first, we need the crossing probabilities, which we get when we calculate the rate
network.hist_args['max_lambda'] = { 'bin_width' : 0.02, 'bin_range' : (0.0, 0.5) }
network.hist_args['pathlength'] = { 'bin_width' : 5, 'bin_range' : (0, 150) }
rates = network.rate_matrix(old_store.steps)
# just use the analyzed network to make the bias
bias = paths.SRTISBiasFromNetwork(network)
bias.df
# For better stats, use the results that I got from a 20k MC step run
# We can create fake TCPs and force them on the network.
tcp_A = paths.analysis.LookupFunction.from_dict(
{0.2: 1.0,
0.3: 0.13293104100673198,
0.4: 0.044370838092911397,
0.5: 0.021975696374764188}
)
tcp_B = paths.analysis.LookupFunction.from_dict(
{0.2: 1.0,
0.3: 0.13293104100673198,
0.4: 0.044370838092911397,
0.5: 0.021975696374764188}
)
tcp_C = paths.analysis.LookupFunction.from_dict(
{0.2: 1.0,
0.3: 0.19485705066078274,
0.4: 0.053373003923696649,
0.5: 0.029175949467020165}
)
# load states for identification purposes
stateA = old_store.volumes['A']
stateB = old_store.volumes['B']
stateC = old_store.volumes['C']
# use the sampling transitions; in MSTIS, these are also stored in from_state
network.from_state[stateA].tcp = tcp_A
network.from_state[stateB].tcp = tcp_B
network.from_state[stateC].tcp = tcp_C
bias = paths.SRTISBiasFromNetwork(network)
bias.df
scheme = paths.SRTISScheme(network, bias=bias, engine=engine)
movevis = paths.visualize.MoveTreeBuilder()
#movevis.mover(scheme.move_decision_tree(), network.all_ensembles)
#SVG(ipynb_visualize(movevis).to_svg())
final_samp0 = old_store.steps[len(old_store.steps)-1].active[network.sampling_ensembles[-1]]
sset = paths.SampleSet([final_samp0])
storage = paths.Storage("srtis.nc", "w", use_uuid=old_store.reference_by_uuid)
storage.save(template)
srtis = paths.PathSampling(
storage=storage,
sample_set=sset,
move_scheme=scheme
)
n_steps_to_run = int(scheme.n_steps_for_trials(
mover=scheme.movers['minus'][0],
n_attempts=1
))
print n_steps_to_run
# logging creates ops_output.log file with details of what the calculation is doing
import logging.config
logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
%%time
multiplier = 2
srtis.run_until(multiplier*n_steps_to_run)
#storage.close()
%%time
#storage = paths.AnalysisStorage("srtis.nc")
#scheme = storage.schemes[0]
scheme.move_summary(storage.steps)
scheme.move_summary(storage.steps, 'shooting')
scheme.move_summary(storage.steps, 'minus')
scheme.move_summary(storage.steps, 'repex')
scheme.move_summary(storage.steps, 'pathreversal')
replica = storage.samplesets[0].samples[0].replica
ensemble_trace = paths.trace_ensembles_for_replica(replica, storage.steps)
print len(ensemble_trace)
srtis_ensembles = scheme.network.sampling_ensembles+scheme.network.special_ensembles['ms_outer'].keys()
srtis_ensemble_numbers = {e : srtis_ensembles.index(e) for e in srtis_ensembles}
# this next is just for pretty printing
srtis_numbers_ensemble = {srtis_ensemble_numbers[e] : e for e in srtis_ensemble_numbers}
for k in sorted(srtis_numbers_ensemble.keys()):
print k, ":", srtis_numbers_ensemble[k].name
plt.plot([srtis_ensemble_numbers[e] for e in ensemble_trace])
count = 0
for i in range(len(ensemble_trace)-1):
[this_val, next_val] = [srtis_ensemble_numbers[ensemble_trace[k]] for k in [i,i+1]]
if this_val == 1 and next_val == 0:
count += 1
count
hist_numbers = [srtis_ensemble_numbers[e] for e in ensemble_trace]
bins = [i-0.5 for i in range(len(srtis_ensembles)+1)]
plt.hist(hist_numbers, bins=bins);
import pandas as pd
hist = paths.analysis.Histogram(bin_width=1.0, bin_range=[-0.5,9.5])
colnames = {i : srtis_numbers_ensemble[i].name for i in range(len(srtis_ensembles))}
df = pd.DataFrame(columns=[colnames[i] for i in colnames])
for i in range(len(hist_numbers)):
hist.add_data_to_histogram([hist_numbers[i]])
if i % 100 == 0:
normalized = hist.normalized()
local_df = pd.DataFrame([normalized.values()], index=[i], columns=[colnames[k] for k in normalized.keys()])
df = df.append(local_df)
plt.pcolormesh(df.fillna(0.0), cmap="bwr", vmin=0.0, vmax=0.2);
plt.gca().invert_yaxis()
plt.colorbar()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Single Replica TIS
Step2: Open the storage and load things from it.
Step3: One of the points of SRTIS is that we use a bias (which comes from an estimate of the crossing probability) in order to improve our sampling.
Step4: Here we actually set up the SRTIS move scheme for the given network. It only requires one line
Step5: Now we'll visualize the SRTIS move scheme.
Step6: Next we need to set up an appropriate single-replica initial sampleset. We'll take the last version of from one of the outer TIS ensembles.
Step7: Finally, we set up the new storage file and the new simulation.
Step8: From here, we'll be doing the analysis of the SRTIS run.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.