Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
13,200
|
<ASSISTANT_TASK:>
Python Code:
try:
import OpenMRS as om
except:
# At this point, you probably haven't installed OpenMRS. You can install it by:
# sudo pip install git+https://github.com/BerryAI/Acai
# Now we are going to import OpenMRS from the source.
# Note: This assumes you are currently in the 'examples/' folder running this notebook.
import os
import sys
CWD = os.getcwd()
print 'current working directory:', CWD
sys.path.append(os.path.join(CWD, '..'))
import OpenMRS as om
example_ratings = om.data.get_example_ratings()
example_tracks = om.data.get_example_tracks()
print 'These are user ids:'
print example_ratings.keys()[:5]
print 'These are track ids and rating scores for the first user:'
print example_ratings.values()[0].items()[:5]
engine = om.RecommendationEngine() # or equivalently, use the following line
# engine = om.RecommendationEngine(catalog=SimpleCatalog(example_tracks))
# Feed the user ratings into the recommendation engine and train it.
engine.train(ratings=example_ratings)
one_user = engine.get_user_ids()[0]
ratings = engine.get_ratings_by_user(user_id=one_user)
print ('Ratings by user %s (5 being most favorable and 1 least favorable):' %
one_user)
for track_id, rating in ratings.items()[:5]:
print ' User rates %s on track %s' % (rating,
engine.catalog.get_track_by_id(track_id))
# Recommend tracks for a user.
recommended_tracks = engine.recommend(user_id=one_user, num=10)
print '\nRecommended tracks for user %s:' % one_user
for t in recommended_tracks:
print t
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load example data
Step2: Start and train a recomemndation engine
Step3: Now the recommendation engine knows about users and their ratings. We can retrieve and print them out.
Step4: Generate recommendations
|
13,201
|
<ASSISTANT_TASK:>
Python Code:
warnings, url = gf.get_warnings_as_json([3022, 3023, 3024], "2017-12-01", "2018-05-31", lang_key=1, simple=False, recursive_count=5)
print(url)
warnings_flattened = (flatten(w) for w in warnings)
df = pd.DataFrame(warnings_flattened)
df.head(5)
df.columns
df_a = df
# define individual columns for avalanche problem 1 (AP1)
df_a['AP1.AvalCauseId'] = np.nan
df_a['AP1.AvalCauseName'] = ''
df_a['AP1.AvalancheProblemTypeId'] = np.nan # AvalancheProblemTypeId
df_a['AP1.AvalancheProblemTypeName'] = '' # AvalancheProblemTypeName
df_a['AP1.DestructiveSizeExtId'] = np.nan # DestructiveSizeExtId
df_a['AP1.DestructiveSizeExtName'] = ''
df_a['AP1.AvalancheProblemId'] = np.nan
df_a['AP1.AvalancheTypeId'] = np.nan
df_a['AP1.AvalancheTypeName'] = ''
df_a['AvalProbabilityId'] = np.nan
df_a['AvalProbabilityName'] = ''
df_a['AvalPropagationId'] = np.nan
df_a['AvalPropagationName'] = ''
df_a['AvalTriggerSimpleId'] = np.nan
df_a['AvalTriggerSimpleName'] = ''
df_a['AP1.Score'] = np.nan # APS score defined in /config/traininng_score.json and config/test_training_score.py
#df_a.columns
df_a['MountainWeather'][23]
beaufort_scale = {'Stille/svak vind': 2, 'Bris': 4, 'Frisk bris': 5, 'Liten kuling': 6, 'Stiv kuling': 7, 'Sterk kuling': 8, 'Liten storm': 9, 'Storm': 10, 'Orkan': 12}
# define columns for meteorological parameters
df_a['Regional.PrecipitationMostExposed'] = np.nan
df_a['Regional.PrecipitationAverage'] = np.nan
df_a['Regional.WindSpeed'] = ''
df_a['Regional.WindDirection'] = ''
#df_a.columns
for index, row in df_a.iterrows():
for mt in row['MountainWeather']['MeasurementTypes']:
#print(mt)
if mt['Id']==20: # wind
#print(mt['Name'])
for st in mt['MeasurementSubTypes']:
if st['Id'] == 20:
df_a.loc[index, 'Regional.WindSpeed'] = beaufort_scale[st['Value']]
if st['Id'] == 50:
df_a.loc[index, 'Regional.WindDirection'] = st['Value']
if mt['Id']==10: # precip
for st in mt['MeasurementSubTypes']:
if st['Id'] == 60:
df_a.loc[index, 'Regional.PrecipitationMostExposed'] = st['Value']
if st['Id'] == 70:
df_a.loc[index, 'Regional.PrecipitationAverage'] = st['Value']
#print(mt['Name'])
for i in range(len(row['AvalancheProblems'])):
column_name_prefix = "AP{0}".format(row['AvalancheProblems'][i]['AvalancheProblemId'])
df_a.loc[index, '{0}.AvalCauseId'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalCauseId']
df_a.loc[index, '{0}.AvalCauseName'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalCauseName']
df_a.loc[index, '{0}.AvalancheProblemTypeId'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalancheProblemTypeId']
df_a.loc[index, '{0}.AvalancheProblemTypeName'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalancheProblemTypeName']
df_a.loc[index, '{0}.DestructiveSizeExtId'.format(column_name_prefix)] = row['AvalancheProblems'][i]['DestructiveSizeExtId']
df_a.loc[index, '{0}.DestructiveSizeExtName'.format(column_name_prefix)] = row['AvalancheProblems'][i]['DestructiveSizeExtName']
df_a.loc[index, '{0}.AvalancheProblemId'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalancheProblemId']
df_a.loc[index, '{0}.AvalancheTypeId'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalancheTypeId']
df_a.loc[index, '{0}.AvalancheTypeName'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalancheTypeName']
df_a.loc[index, '{0}.AvalProbabilityId'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalProbabilityId']
df_a.loc[index, '{0}.AvalProbabilityName'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalProbabilityName']
df_a.loc[index, '{0}.AvalPropagationId'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalPropagationId']
df_a.loc[index, '{0}.AvalPropagationName'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalPropagationName']
df_a.loc[index, '{0}.AvalTriggerSimpleId'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalTriggerSimpleId']
df_a.loc[index, '{0}.AvalTriggerSimpleName'.format(column_name_prefix)] = row['AvalancheProblems'][i]['AvalTriggerSimpleName']
df_a['Regional.WindSpeed'].unique()
# get_score(position, size, distribution, trigger, probability, dangerlevel)
ts = APSTrainingScore()
print(ts.score_dict)
print(ts.score_dict['DangerLevel'][str(int(df_a['DangerLevel'][1]))])
for index, row in df_a.iterrows():
ts.get_score(row['AP1.AvalancheProblemId'],
row['AP1.DestructiveSizeExtId'],
row['AP1.AvalPropagationId'],
row['AP1.AvalTriggerSimpleId'],
row['AP1.AvalProbabilityId'],
row['DangerLevel'])
df_a.loc[index, 'AP1.Score'] = ts.score
df_a.columns
df_a = df_a.drop(['CountyList', 'MunicipalityList'], axis=1)
df_a.to_csv('varsel_nordvestlandet_17_18.csv')
import matplotlib.pyplot as plt
plt.scatter(df_a['DangerLevel'], df_a['AP1.Score'])
import plotly.plotly as py
import plotly.graph_objs as go
df_ws = df_a[df_a['AP1.ProblemTypeId'] == 10.0]
dl1 = go.Scatter(x = df_ws[df_ws["DangerLevel"] == '1']['Regional.PrecipitationMostExposed'], y = df_ws[df_ws["DangerLevel"] == '1']['Regional.WindSpeed'], name="Lowdanger", mode='markers',
marker = {'size': 10, 'color': 'green'})
dl2 = go.Scatter(x = df_ws[df_ws["DangerLevel"] == '2']['Regional.PrecipitationMostExposed'], y = df_ws[df_ws["DangerLevel"] == '2']['Regional.WindSpeed'], name="Moderate danger", mode='markers',
marker = {'size': 10, 'color': 'yellow'})
dl3 = go.Scatter(x = df_ws[df_ws["DangerLevel"] == '3']['Regional.PrecipitationMostExposed'], y = df_ws[df_ws["DangerLevel"] == '3']['Regional.WindSpeed'], name="Considerable danger", mode='markers',
marker = {'size': 10, 'color': 'orange'})
dl4 = go.Scatter(x = df_ws[df_ws["DangerLevel"] == '4']['Regional.PrecipitationMostExposed'], y = df_ws[df_ws["DangerLevel"] == '4']['Regional.WindSpeed'], name="High danger", mode='markers',
marker = {'size': 10, 'color': 'red'})
dl = go.Scatter(x = df_a['Regional.PrecipitationMostExposed'], y = df_a['Regional.WindSpeed'], name="All danger+problems", mode='markers',
marker = {'size': 10, 'color': 'black'})
# Edit the layout
layout = dict(title = 'Wind vs. precipitation (wind slabs)',
xaxis = dict(title = 'Precipitation'),
yaxis = dict(title = 'Wind speed'),
)
pltdata = [dl, dl1, dl2, dl3, dl4]
fig = dict(data=pltdata, layout=layout)
py.iplot(fig, filename='precip_wind_dangerlevel')
from sklearn.cluster import KMeans
### For the purposes of this example, we store feature data from our
### dataframe `df`, in the `f1` and `f2` arrays. We combine this into
### a feature matrix `X` before entering it into the algorithm.
f1 = np.array(df_a['Regional.PrecipitationMostExposed'].values, dtype=int)
f2 = np.array(df_a['Regional.WindSpeed'].values)
f3 = np.array(df_a['AP1.ProblemTypeId'].values)
X = np.dstack((f1, f2, f3))[0]
#print(X)
kmeans = KMeans(n_clusters=4).fit(X)
kmeans.labels_
#print(len(kmeans.labels_), len(f1), len(f2))
dl_class = go.Scatter(x = f1, y = f2, name="k-means classified", mode='markers',
marker = {'size': 10, 'color': kmeans.labels_})
# Edit the layout
layout = dict(title = 'Wind vs. precipitation - classified',
xaxis = dict(title = 'Precipitation'),
yaxis = dict(title = 'Wind speed'),
)
class_data = [dl_class]
fig = dict(data=class_data, layout=layout)
py.iplot(fig, filename='class_precip_wind_dangerlevel')
dl_f3 = go.Scatter(x = f1, y = f2, name="k-means classified", mode='markers',
marker = {'size': 10, 'color': f3})
# Edit the layout
layout = dict(title = 'Wind vs. precipitation - AP1-colored',
xaxis = dict(title = 'Precipitation'),
yaxis = dict(title = 'Wind speed'),
)
ap1_data = [dl_f3]
fig = dict(data=ap1_data, layout=layout)
py.iplot(fig, filename='precip_wind_AP1')
import requests
# http://tst-h-int-api01/APSServices/TimeSeriesReader.svc/DistributionByDate/met_obs_v2.0/<værparameter>/24/<regions_id>.0/<fra_dato>/<til_dato>
#met_url = r"http://h-web03.nve.no/APSapi/TimeSeriesReader.svc/PrecipitationByDate/met_obs_v2.0/0/24/3024.0/2018-03-04/2018-03-04"
met_url = r'http://h-web03.nve.no/APSapi/TimeSeriesReader.svc/MountainWeather/3024.0/2018-03-04/no/true'
met_data = requests.get(met_url).json()
print(met_data)
df_met = pd.DataFrame(met_data)
print(df_met)
from sklearn import tree
X = [[0, 0], [2, 2]]
y = [0.5, 2.5]
clf = tree.DecisionTreeRegressor()
clf = clf.fit(X, y)
clf.predict([[1, 1]])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: See full list at http
Step2: List relevant parameters for the regression analysis
Step3: Clean and save dataframe
Step4: Plotting
Step5: Decision trees
|
13,202
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame({'user':[1,1,2,2,3], 'time':[20,10,11,18, 15], 'amount':[10.99, 4.99, 2.99, 1.99, 10.99]})
### Output your answer into variable 'result'
def g(df):
return df.groupby('user')[['time', 'amount']].apply(lambda x: x.values.tolist()[::-1]).to_frame(name='amount-time-tuple')
result = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
13,203
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
np.max(chars)+1
def split_data(chars, batch_size, num_steps, split_frac=0.9):
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the first split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 50)
train_x.shape
train_x[:,:50]
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# One-hot encoding the input and target characters
x_one_hot = tf.one_hot(inputs, num_classes)
y_one_hot = tf.one_hot(targets, num_classes)
### Build the RNN layers
#lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden)
#cell = tf.contrib.rnn.MultiRNNCell([lstm_cell for _ in range(n_layers)])
#then
#cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(n_hidden) for _ in range(n_layers)])
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
### Run the data through the RNN layers
# Run each sequence step through the RNN and collect the outputs
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one output row for each step for each batch
seq_output = tf.concat(outputs, axis=1)
output = tf.reshape(seq_output, [-1, lstm_size])
# Now connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(num_classes))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and batch
logits = tf.matmul(output, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
preds = tf.nn.softmax(logits, name='predictions')
# Reshape the targets to match the logits
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
cost = tf.reduce_mean(loss)
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
# Export the nodes
# NOTE: I'm using a namedtuple here because I think they are cool
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
batch_size = 10
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
keep_prob = 0.5
epochs = 20
# Save every N iterations
save_every_n = 200
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "checkpoints/i{}_l{}_v{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/____.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step2: And we can see the characters encoded as integers.
Step3: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
Step5: Making training and validation batches
Step6: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
Step7: Looking at the size of this array, we see that we have rows equal to the batch size. When we want to get a batch out of here, we can grab a subset of this array that contains all the rows but has a width equal to the number of steps in the sequence. The first batch looks like this
Step8: I'll write another function to grab batches out of the arrays made by split_data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
Step9: Building the model
Step10: Hyperparameters
Step11: Training
Step12: Saved checkpoints
Step13: Sampling
Step14: Here, pass in the path to a checkpoint and sample from the network.
|
13,204
|
<ASSISTANT_TASK:>
Python Code:
# Standard imports
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
# x-axis range
x = np.linspace(0, 10, 100)
# Create first of two panels
plt.subplot(2,1,1) # (row, col, panel no.)
plt.plot(x, np.sin(x))
# Create second of two panels
plt.subplot(2,1,2)
plt.plot(x, np.cos(x));
# Create grid of plots
# ax will be array of two Axes objects
fig, ax = plt.subplots(2)
ax[0].plot(x, np.sin(x))
ax[1].plot(x, np.cos(x));
x = np.linspace(0, 10, 100)
plt.plot(x, np.sin(x));
plt.plot(x, np.sin(x))
plt.plot(x, np.cos(x));
plt.plot(x, x + 0, color='blue')
plt.plot(x, x + 1, color='g')
plt.plot(x, x + 2, color='0.75')
plt.plot(x, x + 3, color='#FFDD44')
plt.plot(x, x + 4, color=(1.0,0.2,0.3))
plt.plot(x, x + 5, color='chartreuse');
plt.plot(x, x + 0, '-g') # solid green
plt.plot(x, x + 1, '--c') # dashed cyan
plt.plot(x, x + 2, linestyle='-.') # dashdot
plt.plot(x, x + 3, ls='dotted'); # dotted red
# Random walk data
np.random.seed(1234)
y = np.hstack([0, np.random.standard_normal(10).cumsum()])
x = range(len(y))
plt.plot(x, y, color='m', linewidth=2.0,
marker='o', markeredgecolor='b', markersize=20,
markerfacecolor='g', fillstyle='bottom');
plt.figure(figsize=(8, 4)) # Adjusting figure size (width, height)
plt.plot(x, y)
plt.title("Random Walk with $X \sim N(0, 1)$")
plt.xlabel('step')
plt.ylabel('value')
plt.xlim(-1, 11)
plt.ylim(-2, 2);
# Import needed for generating normal probability density function (pdf) data
import scipy.stats as sps
# Create data for the two normal pdf & cdf
x = np.linspace(-4, 4, 100)
y1 = sps.norm.pdf(x)
y2 = sps.norm.cdf(x, scale=0.5)
# Plot data including (TeX) label
plt.figure(figsize=(12, 8))
plt.plot(x, y1, label='Normal pdf with \n $X \sim N(0, 1)$')
plt.plot(x, y2, label='Normal pdf with' + '\n' +
r'$X \sim N\left(0, \frac{1}{2}\right)$')
plt.legend(loc='upper left', frameon=True, framealpha=0.5,
edgecolor='black', facecolor='lightgray',
fontsize=12, labelspacing=1);
# Import Polygon functions
from matplotlib.patches import Polygon
# Create data for a normal pdf's
mu = 4
sig = 0.75
x = np.linspace(0, 8, 100)
y = sps.norm.pdf(x, loc=mu, scale=sig)
# Draw standard normal
fig, ax = plt.subplots(figsize=(12, 6))
ax.plot(x, y, linewidth=2)
ax.set_xlim(0, 8)
# Add title
plt.title(r'Extensive Example with Normal Distribution where $X \sim N(4, \dfrac{3}{4})$')
# Calculate polygon coordinates
# and add polygon shape to plot
a, b = 0, 3.5 # Integral limits
ix = np.linspace(a, b)
iy = sps.norm.pdf(ix, loc=mu, scale=sig)
verts = [(a, 0)] + list(zip(ix, iy)) + [(b, 0)]
poly = Polygon(verts, facecolor='0.8', edgecolor='0.65')
ax.add_patch(poly)
# Add normal distribution function as text
plt.text(x=6.5, y=0.45, s=r'$f(x | \mu, \sigma^2) = \frac{1}{\sqrt{2 \pi \sigma^2}}' +
r'\; \exp\left(- \frac{(x - \mu)^2}{2\sigma^2}\right)$', size=16,
horizontalalignment='center');
# Calculate area under curve
pr = sps.norm.cdf(b, loc=mu, scale=sig)
# Add annotation with area under curve
t = '$\Pr(0 \leq x \leq 3.5) = $ {0:1.3f}'.format(pr)
plt.annotate(t, xy=(3, 0.1), xytext=(0.75, 0.2), size=14,
arrowprops=dict(arrowstyle='->',
connectionstyle='angle3, angleA=0,angleB=120'));
# Generate std. normal rv
rv = np.random.normal(size=(200, 2))
# Draw scatter plot
plt.scatter(rv[:, 0], rv[:, 1]);
for marker in ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd', 'p']:
plt.plot(np.random.rand(5), np.random.rand(5), marker,
label="marker='{0}'".format(marker))
plt.legend(fontsize=12)
plt.xlim(0, 1.6);
# RV from a gamma distribution
gamma = np.random.gamma(size=5000, shape=3, scale=2)
plt.hist(gamma, bins=50, density=True,
alpha=0.5, edgecolor='blue');
# RV from a normal distribution
norm = np.random.normal(size=5000)
plt.hist(norm, bins=50, histtype='stepfilled', alpha=0.3, density=True, cumulative=True)
plt.hist(norm, bins=50, histtype='stepfilled', alpha=0.3, density=True);
kde = sps.gaussian_kde(norm)
xx = np.linspace(-4, 4, 1000)
fig, ax = plt.subplots(figsize=(8, 6))
ax.hist(norm, bins=50, histtype='stepfilled', alpha=0.3, density=True)
ax.plot(xx, kde(xx));
# Dividend return data for Nestle, Novartis 2012-2016
NESN = (3.61, 3.43, 3.30, 3.01, 3.01)
NOVN = (4.22, 3.91, 3.04, 2.84, 3.09)
ind = np.arange(len(NESN)) # the x locations for the groups
width = 0.35 # the width of the bars
# Setup figure, axes: two plots arranged horizontally
fig, ax = plt.subplots(1, 2, figsize=(16, 5))
# Plot left [0] bar chart
rects1 = ax[0].bar(ind, NESN, width)
rects2 = ax[0].bar(ind + width, NOVN, width)
# Add text for title, legend and axes ticks
ax[0].set_title('Dividend Return (in %)')
ax[0].set_xticks(ind + width / 2)
ax[0].set_xticklabels(('2012', '2013', '2014', '2015', '2016'))
ax[0].legend((rects1[0], rects2[0]), ('NESN', 'NOVN'))
# Plot right [1] bar chart
rects3 = ax[1].bar(ind, NESN, width, align='center')
rects4 = ax[1].bar(ind, NOVN, width, bottom=NESN) # bottom used to stack bars
# Add text for title, legend and axes ticks
ax[1].set_title('Dividend Return (in %) [STACKED]')
ax[1].set_xticks(ind)
ax[1].set_xticklabels(('2012', '2013', '2014', '2015', '2016'))
ax[1].legend((rects3[1], rects4[1]), ('NESN', 'NOVN'));
# Pie chart with slices ordered/plotted counter-clockwise:
labels = ['Electrification Products', 'Robotics and Motion',
'Industrial Automation', 'Power Grids']
sizes = [1528, 1195, 824, 1021]
explode = (0, 0.1, 0, 0) # only "explode" 2nd slice (i.e. 'Robotics')
plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
plt.axis('equal'); # Equal aspect ratio ensures that pie is drawn as a circle.
for i in range(1, 7):
plt.subplot(2, 3, i)
plt.text(0.5, 0.5, str((2, 3, i)),
fontsize=18, ha='center')
# Adjust amount of height (hspace) and width (wspace)
# reserved for white space between subplots
plt.subplots_adjust(hspace=0.3, wspace=0.5)
markers = ['.', '+', 'x', '1', '2', '3']
# Draw two rows of 3 plots each with shared axes
fig, ax = plt.subplots(2, 3, sharex='col', sharey='row')
# Add main title above subplots
fig.suptitle('Main Title', fontsize=18)
# Access each plot and add text
for i in range(2):
for j in range(3):
# Create some random data
data = np.random.uniform(size=(30, 2))
# Scatterplot with text label
ax[i, j].scatter(data[:, 0], data[:, 1], marker=markers[2*j+i])
ax[i, j].text(0.5, 0.5, str((i, j)), fontsize=18,
ha='center', weight='bold');
figure_mosaic =
AABC
AADE
fig, axes = plt.subplot_mosaic(mosaic=figure_mosaic, figsize=(11, 5))
axes["B"].plot(x, y1)
axes["C"].plot(x, y2)
axes["D"].scatter(x, y1)
axes["E"].hist(y1, bins=50, density=True, alpha=0.5, edgecolor='blue')
axes["A"].text(0.1, 0.9, "A", color='blue')
axes["A"].text(0.5, 0.5, "Easy, not!?", ha="center", va="center", size=38)
axes["B"].text(1, 0.35, "B", color='blue')
axes["C"].text(1, 0.9, "C", color='blue')
axes["D"].text(1, 0.35, "D", color='blue')
axes["E"].text(0.35, 35, "E", color='blue');
plt.gcf().canvas.get_supported_filetypes_grouped()
# Load package to download share prices from web
import pandas_datareader.data as web
import pandas as pd
# List with stock isin numbers
isin = ['SIX/CH0012221716CHF', 'SIX/CH0002432174CHF', 'SIX/CH0025536027CHF',
'SIX/CH0000816824CHF', 'SIX/CH0001752309CHF', 'SIX/CH0024638212CHF']
# Download closing prices of defined shares
data = pd.DataFrame()
for tick in isin:
data[tick] = web.DataReader(tick, data_source='quandl',
start='2021-01-12', end='2022-01-11',
api_key='H494hBc5iXyUgiF4oX5y')['Price']
# Rename column names from ISIN to Ticker
data.columns = ['ABB', 'BUCN', 'BCHN', 'OERL', 'FI-N', 'SCHN']
# Show data extract
data.head()
# Plot normalized share prices
plt.figure(figsize = (16, 8))
plt.plot(data / data.iloc[-1] * 100) # Normalize prices to 100 and plot
plt.axhline(y=100, color='gray', linestyle=':') # Add dotted line at 100%
plt.legend(data.columns, fontsize=14)
plt.xlabel('Date', fontsize=14)
plt.ylabel('Price (normalized)', fontsize=14)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.title('One-Year Returns of Swiss Industrial Companies', fontsize=16, weight='bold');
# Calculate the log-returns
rets = np.log(data / data.shift(periods=-1))
# Plot daily returns in histogram
plt.hist(rets)
plt.legend(data.columns, loc='upper left');
n = 5000 # Number of simulations
pfrets = [] # Portfolio returns
pfvols = [] # Portfolio volatilities
rf = 0 # Risk-free rate
# Simulate n times 6 random weights and calculate corresponding
# (annual) portfolio return and volatility given the random weights
for p in range(n):
w = np.random.random(rets.shape[1])
w = w / np.sum(w)
pfrets.append(np.sum(rets.mean() * w) * 252 - rf)
pfvols.append(np.sqrt(np.dot(w.T, np.dot(rets.cov() * 252, w))))
# Convert results to an np.array
pfrets = np.array(pfrets)
pfvols = np.array(pfvols)
# Scatterplot of the 5000 simulated portfolios
plt.figure(figsize = (16, 8))
# Color (c) of dot based on Sharpe ratio,
# Color map (cmap) set to 'viridis' style
plt.scatter(pfvols, pfrets, c=((pfrets-rf) / pfvols),
marker='o', cmap='viridis')
# Add colorbar, labels and change tick size
plt.colorbar().set_label(label='Sharpe ratio', size=16)
plt.xlabel('Expected Volatility ($\sigma$)', fontsize=16)
plt.ylabel('Expected Return ($r_p$)', fontsize=16)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12);
# Import Seaborn package
import seaborn as sns
# Plot histogram with kde and normal fit given data
sns.distplot(pfrets, bins=50, norm_hist=True, fit=sps.norm,
hist_kws={'label':'frequency', 'alpha':0.8, 'edgecolor':'k'},
kde_kws={'label':'kde', 'color':'b'},
fit_kws={'label':'Normal pdf', 'alpha':0.5, 'color':'r'})
plt.legend(fontsize=16);
# Import worldbank functions as wb
from pandas_datareader import wb
# Set parameter and download data
ctry = ['US', 'GBR', 'CH']
ind = ['NY.GDP.MKTP.KD.ZG', 'GC.DOD.TOTL.GD.ZS']
df = wb.download(indicator=ind, country=ctry, start=1990, end=2018)
# Drop (multi)index and format columns
df.reset_index(inplace=True)
df.columns = ['Country', 'Year', 'GDP growth', 'Debt']
df.head()
# Draw scatterplot incl. linear regression
# ci=95 and n_boot=100 are default values, thus
# these are just shown for better understanding
fig = sns.lmplot(x='GDP growth', y='Debt', hue='Country', data=df,
truncate=True, height=10, ci=95, n_boot=100)
# Add more informative axis labels
fig.set_axis_labels('GDP Growth (in %)', 'Central Gvmt. Debt (total, in % of GDP)');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Matplotlib's Two Interfaces
Step2: In order to change the figure and axes you could use the plt.gcf() (get current figure) and plt.gca() (get current axes) routines.
Step3: In the object-oriented style you directly work on the figure and axes objects. Sometimes, if you are asked to draw complicated figures, this approach might be advisable as it provides easier control over the figure and axes objects. But in general, these two interface styles provide the same functionalities - the differences are of technical nature. Chosing one approach over the other boils down to preference.
Step4: If we wish to add a second function to the plot, e.g. the cosine function, we can call the plot function multiple times.
Step5: Line Colors and Styles
Step6: Matplotlib will automatically cycle through a set of default colors if no color is specified. By defining a plot style such as plt.style.use('seaborn-whitegrid') the default colors are taken from the 'seaborn-whitegrid'-style file.
Step7: There are many more arguments to further adjust your plot to your liking. A toy example is given below, where the linewidth is manually set and circled markers are added (marker='o'). Regarding available markers a limited overview is shown in section 'Scatterplots' below. The full list of available markers can be found here. For a furhter description on adjustable properties see the Matplotlib documentation.
Step8: Axes, Grids and Labels
Step9: Plot a Legend
Step10: TeX and LaTeX
Step11: Scatter Plots
Step12: The standard marker is a circle. The following figure shows other markers. The complete list can be found here.
Step13: Histogram
Step14: As shown in the above example, the figure can again be adjusted to the user's liking. Available arguments are listed here. alpha=0.5 adjusts the density of the bar color. With density=True the counts will be normalized to form a probability density. These two arguments are helpful if one wishes to combine two histograms as shown in below example where the histogram displays a standard normal pdf combined with its cdf.
Step15: Below is an example one could use if one wishes to add a Kernel Density Estimation (KDE) to a histogram. Further information regarding the problem of bin-size when using histograms, and KDE estimation you can find here or here.
Step16: Bar Charts
Step17: There are other options such as the plotting functionalities provided through Plotly, Bokeh or Altair, which are much more accessible in plotting barplots than Matplotlib.
Step18: Subplots
Step19: The plt.subplots() function provides better control over elements and axes. Rather than creating subplot by subplot, plt.subplots() creates a full grid of subplots in a single line. Accessing them works similar to a NumPy array. Sharing x-axis and y-axis is easily done with the sharex= and sharey= command, respectively. Below is an example on how to create a 2x3 grid of subplots, where all plots in the same row share a common y-axis scale and all axes in the same column have their x-axis scale in common. We will use the object-oriented approach.
Step21: An alternative approach emerging is the plt.subplot_mosaic function. It offers a suprisingly simple way of arranging plots.
Step22: Saving Figures
Step23: Beyond Matplotlib
Step24: Scatter Plot
Step25: Histogram
Step26: Scatter Plot
Step27: Now that we have the data in a proper format, we use Seaborn's lmplot() function to plot debt against gdp growth. The lmplot() adds a linear regression to the data, incl. confidence bands (here set to 95%; based on 100 bootstrap resamples).
|
13,205
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
img = Image.open("img/colonies.jpg")
plt.imshow(img)
arr = np.array(img)
print("x,y,RGB ->",arr.shape)
arr = np.array(img)
plt.imshow(arr)
plt.show()
arr[:,:,1] = 255
plt.imshow(arr)
x = Image.fromarray(arr)
x.save('junk.png')
arr = np.array(img)
low_red = arr[:,:,0] < 50
arr[:,:,0] = low_red*255
arr[:,:,1] = low_red*255
arr[:,:,2] = low_red*255
plt.show()
plt.imshow(arr)
print("True and True =", bool(True*True))
print("False and True =", bool(False*True))
print("Flase and False =",bool(False*False))
print("True or False =",bool(True + False))
print("True or True =",bool(True + True))
print("False or False =",bool(False + False))
arr = np.array(img)
red = arr[:,:,0] < 2
green = arr[:,:,1] > 5
blue = arr[:,:,2] < 5
mask = red*green*blue
new_arr = np.zeros(arr.shape,dtype=arr.dtype)
new_arr[:,:,1] = mask*255
plt.imshow(img)
img = Image.open("img/colonies.jpg")
arr = np.array(img)
mask = arr[:,:,0] > 20
arr[mask,0] = 0
arr[mask,1] = 0
arr[mask,2] = 0
plt.imshow(arr)
img2 = Image.fromarray(arr)
img2.save("junk.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Python Image Library (PIL)
Step2: You can load images as Image instances
Step3: Image instances can be interconverted with numpy arrays
Step4: Values are represented for different channels in bits
Step5: Can convert arrays back to images and save
Step6: One powerful method is to create a mask
Step7: You can combine masks with "*" (and) and "+" (or)
Step8: Combined mask
Step9: Find pixels with some green but little red or blue, then set those to green. All other pixels are black.
|
13,206
|
<ASSISTANT_TASK:>
Python Code:
oxp = Symbol("Omega_x'")
b = Symbol("b")
n = Symbol("n")
theta = Symbol("theta")
w = Symbol("w")
s = Symbol("s")
a = Symbol("a")
subsampledOmega = (binomial(s, b) * binomial(n - s, a - b)) / binomial(n, a)
subsampledFpF = Sum(subsampledOmega, (b, theta, s))
subsampledOmegaSlow = (binomial(s, b) * binomial(n - s, a - b))
subsampledFpFSlow = Sum(subsampledOmegaSlow, (b, theta, s))/ binomial(n, a)
display(subsampledFpF)
display(subsampledFpFSlow)
display("n=1024, a=8, s=4, omega=2", subsampledFpF.subs(s, 4).subs(n, 1024).subs(a, 8).subs(theta, 2).evalf())
display("n=200000, a=2000, s=20, theta=10", subsampledFpF.subs(theta, 10).subs(s, 20).subs(n, 200000).subs(a, 2000).evalf())
display("n=200000, a=2000, s=40, theta=10", subsampledFpF.subs(theta, 15).subs(s, 120).subs(n, 200000).subs(a, 2000).evalf())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: where n refers to the size of the population of cells, a is the number of active cells at any instance in time, s is the number of actual synapses on a dendritic segment, and θ is the threshold for NMDA spikes. Following (Ahmad & Hawkins, 2015), the numerator counts the number of possible ways θ or more cells can match a fixed set of s synapses. The denominator counts the number of ways a cells out of n can be active.
|
13,207
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from landlab import RasterModelGrid
grid = RasterModelGrid((200, 400), xy_spacing=(10e3, 20e3))
grid.dy, grid.dx
from landlab.components.flexure import Flexure
Flexure.input_var_names
Flexure.var_units("lithosphere__overlying_pressure_increment")
Flexure.var_help("lithosphere__overlying_pressure_increment")
Flexure.output_var_names
Flexure.var_help("lithosphere_surface__elevation_increment")
grid.add_zeros("lithosphere__overlying_pressure_increment", at="node")
flex = Flexure(grid, method="flexure", n_procs=4)
load = np.random.normal(0, 100 * 2650.0 * 9.81, grid.number_of_nodes)
grid.at_node["lithosphere__overlying_pressure_increment"] = load
grid.imshow(
"lithosphere__overlying_pressure_increment",
symmetric_cbar=True,
cmap="nipy_spectral",
)
flex.update()
grid.imshow(
"lithosphere_surface__elevation_increment",
symmetric_cbar=True,
cmap="nipy_spectral",
)
flex.eet *= 2.0
flex.update()
grid.imshow(
"lithosphere_surface__elevation_increment",
symmetric_cbar=True,
cmap="nipy_spectral",
)
load[np.where(np.logical_and(grid.node_x > 3000000, grid.node_x < 5000000))] = (
load[np.where(np.logical_and(grid.node_x > 3000000, grid.node_x < 5000000))] + 1e7
)
grid.imshow(
"lithosphere__overlying_pressure_increment",
symmetric_cbar=True,
cmap="nipy_spectral",
)
flex.update()
grid.imshow(
"lithosphere_surface__elevation_increment",
symmetric_cbar=True,
cmap="nipy_spectral",
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create the grid
Step2: Create a rectilinear grid with a spacing of 10 km between rows and 20 km between columns. The numbers of rows and columms are provided as a tuple of (n_rows, n_cols), in the same manner as similar numpy functions. The spacing is also a tuple, (dy, dx).
Step3: Create the component
Step4: The Flexure component, as with most landlab components, will require our grid to have some data that it will use. We can get the names of these data fields with the intput_var_names attribute of the component class.
Step5: We see that flexure uses just one data field
Step6: To print a more detailed description of a field, use var_help.
Step7: What about the data that Flexure provides? Use the output_var_names attribute.
Step8: Now that we understand the component a little more, create it using our grid.
Step9: Add some loading
Step10: Update the component to solve for deflection
Step11: As we saw above, the flexure component creates an output field (lithosphere_surface__elevation_increment) that contains surface deflections for the applied loads.
Step12: Maintain the same loading distribution but double the effective elastic thickness.
Step13: Now let's add a vertical rectangular load to the middle of the grid. We plot the load grid first to make sure we did this correctly.
|
13,208
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv('data/airline_delay_causes_2015.csv')
df.head()
df.columns = df.columns.str.strip()
df['month'] = df['month'].map(lambda x: '0' + str(x) if len(str(x)) < 2 else x)
df.month.unique()
agg_month_sum = df.groupby('month',as_index=False).sum()
not_ontime_flights = ['arr_cancelled','arr_diverted','arr_del15']
agg_month_sum['on_time_flights'] = agg_month_sum['arr_flights'] - agg_month_sum[not_ontime_flights].sum(axis=1)
delayed_columns = agg_month_sum.columns[agg_month_sum.columns.str.endswith('_delay')]
agg_month_sum[delayed_columns] = agg_month_sum[delayed_columns].applymap(lambda x: x/60)
agg_month_sum.to_csv('agg_month_sum_airlines_2015.csv_',index=False)
%matplotlib inline
df[df.month == 6].groupby('carrier_name').sum().T.plot()
df['delay_minutes_per_delayed_flight'] = (df.carrier_delay / df.carrier_ct)
date_df = df.groupby(['carrier_name','month'],as_index=False).delay_minutes_per_delayed_flight.mean()
date_df.to_csv('carr_delay_2015.csv_',index=False)
%matplotlib inline
df.groupby('carr_delay_2015.csv_')
1206011 / 19579
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Since month in dimple only except 2 digit format, change this
Step2: We want to have total number of operations and total minutes delay. So we're going to aggregate it per month.
|
13,209
|
<ASSISTANT_TASK:>
Python Code:
# Import modules
import numpy as np
import scipy
import sympy as sym
from scipy import sparse
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from IPython.display import Math
from IPython.display import display
sym.init_printing(use_latex=True)
def heatfd(xl, xr, yb, yt, M, N):
f = lambda x : np.power(np.sin(2 * np.pi * x), 2)
l = lambda t : 0 * t
r = lambda t : 0 * t
D = 1
h = (xr - xl) / M
k = (yt - yb) / N
m = M - 1
n = N
sigma = D * k / np.power(h, 2)
A = np.diag(1 - 2 * sigma * np.ones(m)) + \
np.diag(sigma * np.ones(m - 1), 1) + \
np.diag(sigma * np.ones(m - 1), -1)
lside = l(yb + np.arange(n) * k)
rside = r(yb + np.arange(n) * k)
x = sym.Symbol('x')
expr = sym.sin(2 * sym.pi * x) ** 2
# expr = sym.diff(expr, x)
w = np.zeros(n * m).reshape(n, m).astype(np.float128)
for i in range(m):
w[0, i] = expr.subs(x, xl + (i + 1) * h).evalf()
for j in range(n - 1):
ww = np.zeros(m)
ww[0] = lside[j]
ww[-1] = rside[j]
v = np.matmul(A, w[j]) + sigma * ww
w[j + 1,:] = v
w = np.column_stack([lside, w, rside])
x = np.arange(0, m+2) * h
t = np.arange(0, n) * k
X, T = np.meshgrid(x, t)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, T, w)
plt.show()
plt.clf()
heatfd(0, 1, 0, 1, 30, 2000)
def heatbd(xl, xr, yb, yt, M, N):
f = lambda x : np.sin(2 * np.pi * x) ** 2
l = lambda t : 0 * t
r = lambda t : 0 * t
h = (xr - xl) / M
k = (yt - yb) / N
m = M - 1
n = N
D = 1 # diffusion coefficient
sigma = D * k / (h ** 2)
A = np.diag(1 + 2 * sigma * np.ones(m)) + \
np.diag(-sigma * np.ones(m - 1), 1) + \
np.diag(-sigma * np.ones(m - 1), -1)
lside = l(yb + np.arange(n) * k)
rside = r(yb + np.arange(n) * k)
''' Initial conditions '''
x = sym.Symbol('x')
expr = sym.sin(2 * sym.pi * x) ** 2
# expr = sym.diff(expr, x)
w = np.zeros(n * m).reshape(n, m).astype(np.float128)
for i in range(m):
w[0, i] = expr.subs(x, xl + (i + 1) * h).evalf()
for j in range(n - 1):
ww = np.zeros(m)
ww[0] = lside[j]
ww[-1] = rside[j]
v = np.matmul(np.linalg.inv(A), w[j,:] + sigma * ww)
w[j + 1,:] = v
w = np.column_stack([lside, w, rside])
x = np.arange(0, m+2) * h
t = np.arange(0, n) * k
X, T = np.meshgrid(x, t)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, T, w)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plt.clf()
heatbd(0, 1, 0, 1, 30, 20)
def heatbd(xl, xr, yb, yt, M, N):
l = lambda t : np.exp(t)
r = lambda t : np.exp(t - 0.5)
h = (xr - xl) / M
k = (yt - yb) / N
m = M - 1
n = N
D = 4 # diffusion coefficient
sigma = D * k / (h ** 2)
A = np.diag((1 + 2 * sigma) * np.ones(m)) + \
np.diag(-sigma * np.ones(m - 1), 1) + \
np.diag(-sigma * np.ones(m - 1), -1)
lside = l(yb + np.arange(n) * k)
rside = r(yb + np.arange(n) * k)
''' Initial conditions '''
x = sym.Symbol('x')
expr = sym.exp(-x / 2)
# expr = sym.diff(expr, x)
w = np.zeros(n * m).reshape(n, m).astype(np.float128)
for i in range(m):
w[0, i] = expr.subs(x, xl + (i + 1) * h).evalf()
for j in range(n - 1):
ww = np.zeros(m)
ww[0] = lside[j]
ww[-1] = rside[j]
v = np.matmul(np.linalg.inv(A), w[j,:] + sigma * ww)
w[j + 1,:] = v
w = np.column_stack([lside, w, rside])
x = np.arange(0, m+2) * h
t = np.arange(0, n) * k
X, T = np.meshgrid(x, t)
fig = plt.figure()
ax = fig.gca(projection='3d')
stride = 0
ax.plot_surface(X[stride:], T[stride:], w[stride:])
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plt.clf()
heatbd(0, 1, 0, 1, 20, 100)
def heatbdn(xl, xr, yb, yt, M, N):
h = (xr - xl) / M
k = (yt - yb) / N
m = M - 1
n = N
D = 1 # diffusion coefficient
sigma = D * k / (h ** 2)
A = np.diag((1 + 2 * sigma) * np.ones(m)) + \
np.diag(-sigma * np.ones(m - 1), 1) + \
np.diag(-sigma * np.ones(m - 1), -1)
A[0,:3] = np.array([-3, 4, -1])
A[-1,-3:] = np.array([-1, 4, -3])
''' Initial conditions '''
x = sym.Symbol('x')
expr = sym.sin(2 * sym.pi * x) ** 2
# expr = sym.diff(expr, x)
w = np.zeros(n * m).reshape(n, m).astype(np.float128)
for i in range(m):
w[0, i] = expr.subs(x, xl + (i + 1) * h).evalf()
for j in range(n - 1):
b = w[j,:]
b[0] = 0
b[-1] = 0
w[j + 1,:] = np.matmul(np.linalg.inv(A), b)
x = np.arange(0, m) * h
t = np.arange(0, n) * k
X, T = np.meshgrid(x, t)
fig = plt.figure()
ax = fig.gca(projection='3d')
stride = 0
ax.plot_surface(X[stride:], T[stride:], w[stride:])
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plt.clf()
heatbdn(0, 1, 0, 1, 20, 20)
def crank_nicolson_heat(xl, xr, yb, yt, M, N):
l = lambda t : 0 * t
r = lambda t : 0 * t
D = 1
h = (xr - xl) / M
k = (yt - yb) / N
m = M - 1
n = N
sigma = D * k / (h ** 2)
A = np.diag((2 + 2 * sigma) * np.ones(m)) + \
np.diag(-sigma * np.ones(m - 1), 1) + \
np.diag(-sigma * np.ones(m - 1), -1)
B = np.diag((2 - 2 * sigma) * np.ones(m)) + \
np.diag(sigma * np.ones(m - 1), 1) + \
np.diag(sigma * np.ones(m - 1), -1)
lside = l(yb + np.arange(n) * k)
rside = r(yb + np.arange(n) * k)
''' Initial conditions '''
x = sym.Symbol('x')
expr = sym.sin(2 * sym.pi * x) ** 2
# expr = sym.diff(expr, x)
w = np.zeros(n * m).reshape(n, m).astype(np.float128)
for i in range(m):
w[0, i] = expr.subs(x, xl + (i + 1) * h).evalf()
for j in range(n - 1):
s = np.zeros(m)
s[0] = lside[j] + lside[j+1]
s[-1] = rside[j] + rside[j+1]
w[j + 1,:] = np.matmul(np.linalg.inv(A), np.matmul(B, w[j,:]) + sigma * s)
w = np.column_stack([lside, w, rside])
x = xl +np.arange(0, m+2) * h
t = yb + np.arange(0, n) * k
X, T = np.meshgrid(x, t)
fig = plt.figure()
ax = fig.gca(projection='3d')
stride = 0
ax.plot_surface(X[stride:], T[stride:], w[stride:])
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plt.clf()
crank_nicolson_heat(0, 0.5, 0, 1, 30, 100)
def crank_nicolson_growth(xl, xr, yb, yt, M, N):
l = lambda t : 0 * t
r = lambda t : 0 * t
D = 1
L = 1
C = 9.5
h = (xr - xl) / M
k = (yt - yb) / N
m = M - 1
n = N
sigma = D * k / h ** 2
A = np.diag((2 - k * C + 2 * sigma) * np.ones(m)) + \
np.diag(-sigma * np.ones(m - 1), 1) + \
np.diag(-sigma * np.ones(m - 1), -1)
B = np.diag((2 + k * C - 2 * sigma) * np.ones(m)) + \
np.diag(sigma * np.ones(m - 1), 1) + \
np.diag(sigma * np.ones(m - 1), -1)
lside = l(yb + np.arange(n) * k)
rside = r(yb + np.arange(n) * k)
''' Initial conditions '''
f = lambda x : np.power(np.sin(np.pi * x / L), 2)
w = np.zeros(n * m).reshape(n, m).astype(np.float128)
for i in range(m):
w[0, i] = f(xl + (i + 1) * h)
for j in range(n - 1):
s = np.zeros(m)
s[0] = lside[j] + lside[j+1]
s[-1] = rside[j] + rside[j+1]
w[j + 1,:] = np.matmul(np.linalg.inv(A), np.matmul(B, w[j,:]) + sigma * s)
w = np.column_stack([lside, w, rside])
x = xl + np.arange(0, m+2) * h
t = yb + np.arange(0, n) * k
X, T = np.meshgrid(x, t)
fig = plt.figure()
ax = fig.gca(projection='3d')
stride = 0
ax.plot_surface(X[stride:], T[stride:], w[stride:])
plt.xlabel('x')
plt.ylabel('t')
plt.show()
plt.clf()
crank_nicolson_growth(0, 1, 0, 1, 20, 20)
def wavefd(xl, xr, yb, yt, M, N):
c = 1
h = (xr - xl) / M
k = (yt - yb) / N
m = M - 1
n = N
sigma = c * k / h
f = lambda x : np.sin(x * np.pi)
l = lambda x : 0 * x
r = lambda x : 0 * x
g = lambda x : 0 * x
lside = l(yb + np.arange(n) * k)
rside = r(yb + np.arange(n) * k)
A = np.diag((2 - 2 * sigma ** 2) * np.ones(m)) + \
np.diag((sigma ** 2) * np.ones(m - 1), 1) + \
np.diag((sigma ** 2) * np.ones(m - 1), -1)
'''Initial condition'''
w = np.zeros(n * m).reshape(n, m).astype(np.float128)
xv = np.linspace(0, 1, M + 1)[1:-1]
w[0, :] = f(xv)
w[1, :] = 0.5 * np.matmul(A, w[0, :]) + \
k * g(xv) + \
0.5 * np.power(sigma, 2) * np.array([lside[0], *np.zeros(m - 2), rside[0]])
for i in range(2, n - 1):
w[i,:] = np.matmul(A, w[i-1,:]) - w[i-2,:] + np.power(sigma, 2) * \
np.array([lside[i-1], *np.zeros(m - 2), rside[i-1]])
w = np.column_stack([lside, w, rside])
x = xl + np.arange(0, m + 2) * h
t = yb + np.arange(0, n) * k
X, T = np.meshgrid(x, t)
fig = plt.figure()
ax = fig.gca(projection='3d')
stride = 0
ax.plot_wireframe(X[stride:], T[stride:], w[stride:])
# ax.plot_surface(X[stride:], T[stride:], w[stride:])
ax.view_init(azim=60, elev=30)
plt.xlabel('x')
plt.ylabel('t')
plt.show()
plt.clf()
wavefd(0, 1, 0, 1, 20, 20)
def wavefd_cfl(xl, xr, yb, yt, M, N, C = 1):
c = C
h = (xr - xl) / M
k = (yt - yb) / N
if c * k > h:
raise ValueError("CFL condition 'c * k <= h' is not satisfied, c * k is %f and h is %f" %(c * k, h) )
m = M - 1
n = N
sigma = c * k / h
f = lambda x : np.sin(x * np.pi)
l = lambda x : 0 * x
r = lambda x : 0 * x
g = lambda x : 0 * x
lside = l(yb + np.arange(n) * k)
rside = r(yb + np.arange(n) * k)
A = np.diag((2 - 2 * sigma ** 2) * np.ones(m)) + \
np.diag((sigma ** 2) * np.ones(m - 1), 1) + \
np.diag((sigma ** 2) * np.ones(m - 1), -1)
'''Initial condition'''
w = np.zeros(n * m).reshape(n, m).astype(np.float128)
xv = np.linspace(0, 1, M + 1)[1:-1]
w[0, :] = f(xv)
w[1, :] = 0.5 * np.matmul(A, w[0, :]) + \
k * g(xv) + \
0.5 * np.power(sigma, 2) * np.array([lside[0], *np.zeros(m - 2), rside[0]])
for i in range(2, n - 1):
w[i,:] = np.matmul(A, w[i-1,:]) - w[i-2,:] + np.power(sigma, 2) * \
np.array([lside[i-1], *np.zeros(m - 2), rside[i-1]])
w = np.column_stack([lside, w, rside])
x = xl + np.arange(0, m + 2) * h
t = yb + np.arange(0, n) * k
X, T = np.meshgrid(x, t)
fig = plt.figure()
ax = fig.gca(projection='3d')
stride = 0
ax.plot_wireframe(X[stride:], T[stride:], w[stride:])
# ax.plot_surface(X[stride:], T[stride:], w[stride:])
ax.view_init(azim=20, elev=20)
plt.xlabel('x')
plt.ylabel('t')
plt.show()
plt.clf()
wavefd_cfl(0, 1, 0, 1, 20, 200, 6)
def poisson(xl, xr, yb, yt, M, N):
f = lambda x, y : 0
g1 = lambda x : np.log(pow(x, 2) + 1)
g2 = lambda x : np.log(pow(x, 2) + 4)
g3 = lambda y : 2 * np.log(y)
g4 = lambda y : np.log(pow(y, 2) + 1)
m, n = M + 1, N + 1
mn = m * n
h, k = (xr - xl) / M, (yt - yb) / N
h2, k2 = pow(h, 2), pow(k, 2)
x = xl + np.arange(M + 1) * h
y = yb + np.arange(N + 1) * k
A = np.zeros((mn, mn))
b = np.zeros((mn, 1))
''' interior points '''
for i in range(2, m):
for j in range(2, n):
A[i+(j-1)*m - 1][i-1+(j-1)*m - 1] = 1 / h2
A[i+(j-1)*m - 1][i+1+(j-1)*m - 1] = 1 / h2
A[i+(j-1)*m - 1][i+(j-1)*m - 1] = - 2 / h2 - 2 / k2
A[i+(j-1)*m - 1][i+(j-2)*m - 1] = 1 / k2
A[i+(j-1)*m - 1][i+j*m - 1] = 1 / k2
b[i+(j-1)*m - 1] = f(x[i], y[j])
''' bottom and top boundary points '''
for i in range(1, m + 1):
j = 1
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g1(x[i - 1])
j = n
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g2(x[i - 1])
''' left and right boundary points '''
for j in range(2, n):
i = 1
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g3(y[j - 1])
i = m
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g4(y[j - 1])
v = np.matmul(np.linalg.inv(A), b)
w = v.reshape(n, m).T
X, Y = np.meshgrid(x, y)
fig = plt.figure()
ax =fig.gca(projection='3d')
ax.view_init(azim=225)
ax.plot_surface(X, Y, w)
plt.xlabel('x')
plt.ylabel('y')
plt.show(fig)
plt.close()
poisson(0, 1, 1, 2, 4, 4)
poisson(0, 1, 1, 2, 10, 10)
def poisson(xl, xr, yb, yt, M, N):
f = lambda x, y : 0
g1 = lambda x : np.sin(x * np.pi)
g2 = lambda x : np.sin(x * np.pi)
g3 = lambda y : 0
g4 = lambda y : 0
m, n = M + 1, N + 1
mn = m * n
h, k = (xr - xl) / M, (yt - yb) / N
h2, k2 = pow(h, 2), pow(k, 2)
x = xl + np.arange(M + 1) * h
y = yb + np.arange(N + 1) * k
A = np.zeros((mn, mn))
b = np.zeros((mn, 1))
''' interior points '''
for i in range(2, m):
for j in range(2, n):
A[i+(j-1)*m - 1][i-1+(j-1)*m - 1] = 1 / h2
A[i+(j-1)*m - 1][i+1+(j-1)*m - 1] = 1 / h2
A[i+(j-1)*m - 1][i+(j-1)*m - 1] = - 2 / h2 - 2 / k2
A[i+(j-1)*m - 1][i+(j-2)*m - 1] = 1 / k2
A[i+(j-1)*m - 1][i+j*m - 1] = 1 / k2
b[i+(j-1)*m - 1] = f(x[i], y[j])
''' bottom and top boundary points '''
for i in range(1, m + 1):
j = 1
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g1(x[i - 1])
j = n
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g2(x[i - 1])
''' left and right boundary points '''
for j in range(2, n):
i = 1
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g3(y[j - 1])
i = m
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g4(y[j - 1])
v = np.matmul(np.linalg.inv(A), b)
w = v.reshape(n, m).T
X, Y = np.meshgrid(x, y)
fig = plt.figure()
ax =fig.gca(projection='3d')
ax.view_init(azim=225)
ax.plot_surface(X, Y, w)
plt.xlabel('x')
plt.ylabel('y')
plt.show(fig)
plt.close()
poisson(0, 1, 0, 1, 10, 10)
def poissonfem(xl, xr, yb, yt, M, N):
f = lambda x, y : 0
r = lambda x, y : 0
g1 = lambda x : np.log(pow(x, 2) + 1)
g2 = lambda x : np.log(pow(x, 2) + 4)
g3 = lambda y : 2 * np.log(y)
g4 = lambda y : np.log(pow(y, 2) + 1)
m, n = M + 1, N + 1
mn = m * n
h, k = (xr - xl) / M, (yt - yb) / N
hk = h * k
h2, k2 = pow(h, 2), pow(k, 2)
x = xl + np.arange(M + 1) * h
y = yb + np.arange(N + 1) * k
A = np.zeros((mn, mn))
b = np.zeros((mn, 1))
B1 = lambda i, j : (x[i] - 2 * h / 3, y[j] - k / 3)
B2 = lambda i, j : (x[i] - h / 3, y[j] - 2 * k / 3)
B3 = lambda i, j : (x[i] + h / 3, y[j] - k / 3)
B4 = lambda i, j : (x[i] + 2 * h / 3, y[j] + k / 3)
B5 = lambda i, j : (x[i] + h / 3, y[j] + 2 * k / 3)
B6 = lambda i, j : (x[i] - h / 3, y[j] + k / 3)
''' interior points '''
for i in range(2, m):
for j in range(2, n):
rsum = r(*B1(i,j)) + r(*B2(i,j)) + r(*B3(i,j)) + r(*B4(i,j)) + r(*B5(i,j)) + r(*B6(i,j))
fsum = f(*B1(i,j)) + f(*B2(i,j)) + f(*B3(i,j)) + f(*B4(i,j)) + f(*B5(i,j)) + f(*B6(i,j))
A[i+(j-1)*m - 1][i+(j-1)*m - 1] = 2 * (h2 + k2) / hk - hk * rsum / 18
A[i+(j-1)*m - 1][i-1+(j-1)*m - 1] = -k/h - hk * (r(*B1(i,j)) + r(*B6(i,j))) / 18
A[i+(j-1)*m - 1][i-1+(j-2)*m - 1] = -hk * (r(*B1(i,j)) + r(*B2(i,j))) / 18
A[i+(j-1)*m - 1][i+(j-2)*m - 1] = -h/k - hk * (r(*B2(i,j)) + r(*B3(i,j))) / 18
A[i+(j-1)*m - 1][i+1+(j-1)*m - 1] = -k/h - hk * (r(*B3(i,j)) + r(*B4(i,j))) / 18
A[i+(j-1)*m - 1][i+1+j*m - 1] = -hk * (r(*B4(i,j)) + r(*B5(i,j))) / 18
A[i+(j-1)*m - 1][i+j*m - 1] = - h / k - hk * (r(*B5(i,j)) + r(*B6(i,j))) / 18
b[i+(j-1)*m - 1] = - h * k * fsum / 6
''' bottom and top boundary points '''
for i in range(1, m + 1):
j = 1
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g1(x[i - 1])
j = n
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g2(x[i - 1])
''' left and right boundary points '''
for j in range(2, n):
i = 1
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g3(y[j - 1])
i = m
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g4(y[j - 1])
v = np.matmul(np.linalg.inv(A), b)
w = v.reshape(n, m).T
X, Y = np.meshgrid(x, y)
fig = plt.figure()
ax =fig.gca(projection='3d')
ax.view_init(azim=225)
ax.plot_surface(X, Y, w)
plt.xlabel('x')
plt.ylabel('y')
plt.show(fig)
plt.close()
poissonfem(0, 1, 1, 2, 4, 4)
poissonfem(0, 1, 1, 2, 10, 10)
def poissonfem(xl, xr, yb, yt, M, N):
f = lambda x, y : 2 * np.sin(2 * np.pi * y)
r = lambda x, y : 4 * pow(np.pi, 2)
g1 = lambda x : 0
g2 = lambda x : 0
g3 = lambda y : 0
g4 = lambda y : np.sin(2 * np.pi * y)
m, n = M + 1, N + 1
mn = m * n
h, k = (xr - xl) / M, (yt - yb) / N
hk = h * k
h2, k2 = pow(h, 2), pow(k, 2)
x = xl + np.arange(M + 1) * h
y = yb + np.arange(N + 1) * k
A = np.zeros((mn, mn))
b = np.zeros((mn, 1))
B1 = lambda i, j : (x[i] - 2 * h / 3, y[j] - k / 3)
B2 = lambda i, j : (x[i] - h / 3, y[j] - 2 * k / 3)
B3 = lambda i, j : (x[i] + h / 3, y[j] - k / 3)
B4 = lambda i, j : (x[i] + 2 * h / 3, y[j] + k / 3)
B5 = lambda i, j : (x[i] + h / 3, y[j] + 2 * k / 3)
B6 = lambda i, j : (x[i] - h / 3, y[j] + k / 3)
''' interior points '''
for i in range(2, m):
for j in range(2, n):
rsum = r(*B1(i,j)) + r(*B2(i,j)) + r(*B3(i,j)) + r(*B4(i,j)) + r(*B5(i,j)) + r(*B6(i,j))
fsum = f(*B1(i,j)) + f(*B2(i,j)) + f(*B3(i,j)) + f(*B4(i,j)) + f(*B5(i,j)) + f(*B6(i,j))
A[i+(j-1)*m - 1][i+(j-1)*m - 1] = 2 * (h2 + k2) / hk - hk * rsum / 18
A[i+(j-1)*m - 1][i-1+(j-1)*m - 1] = -k/h - hk * (r(*B1(i,j)) + r(*B6(i,j))) / 18
A[i+(j-1)*m - 1][i-1+(j-2)*m - 1] = -hk * (r(*B1(i,j)) + r(*B2(i,j))) / 18
A[i+(j-1)*m - 1][i+(j-2)*m - 1] = -h/k - hk * (r(*B2(i,j)) + r(*B3(i,j))) / 18
A[i+(j-1)*m - 1][i+1+(j-1)*m - 1] = -k/h - hk * (r(*B3(i,j)) + r(*B4(i,j))) / 18
A[i+(j-1)*m - 1][i+1+j*m - 1] = -hk * (r(*B4(i,j)) + r(*B5(i,j))) / 18
A[i+(j-1)*m - 1][i+j*m - 1] = - h / k - hk * (r(*B5(i,j)) + r(*B6(i,j))) / 18
b[i+(j-1)*m - 1] = - h * k * fsum / 6
''' bottom and top boundary points '''
for i in range(1, m + 1):
j = 1
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g1(x[i - 1])
j = n
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g2(x[i - 1])
''' left and right boundary points '''
for j in range(2, n):
i = 1
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g3(y[j - 1])
i = m
A[i+(j-1)*m - 1][i+(j-1)*m - 1]=1
b[i+(j-1)*m - 1] = g4(y[j - 1])
v = np.matmul(np.linalg.inv(A), b)
w = v.reshape(n, m).T
X, Y = np.meshgrid(x, y)
fig = plt.figure()
ax =fig.gca(projection='3d')
ax.view_init(azim=225)
ax.plot_surface(X, Y, w)
plt.xlabel('x')
plt.ylabel('y')
plt.show(fig)
plt.close()
poissonfem(0, 1, 0, 1, 16, 16)
def burgers(xl, xr, tb, te, M, N):
alpha = 5
beta = 4
D = 0.05
f = lambda x : 2 * D * beta * np.pi * np.sin(x * np.pi) / (alpha + beta * np.cos(np.pi * x))
l = lambda t : 0 * t
r = lambda t : 0 * t
h, k = (xr - xl) / M, (te - tb) / N
m, n = M + 1, N
sigma = D * k / (h * h)
w = np.zeros((M + 1) * (n + 1)).reshape(M + 1, n + 1)
w[:, 0] = f(xl + np.arange(M + 1) * h)
w1 = np.copy(w[:, 0])
for j in range(0, n):
for it in range(3):
DF1 = np.diag(1 + 2 * sigma * np.ones(m)) + np.diag(-sigma * np.ones(m-1), 1) \
+ np.diag(-sigma * np.ones(m-1), -1)
DF2 = np.diag([0,*(k * w1[2:m] / (2 * h)),0]) - np.diag([0,*(k * w1[0:m - 2] / (2 * h)),0]) \
+ np.diag([0,*(k * w1[1:m - 1] / (2 * h))], 1) - np.diag([*(k * w1[1:m - 1] / (2 * h)), 0], -1)
DF = DF1 + DF2;
F = -w[:,j] + np.matmul((DF1 + DF2 / 2), w1)
DF[0,:] = np.array([1, *np.zeros(m-1)])
F[0] = w1[0] - l(j)
F[m-1] = w1[m-1] - r(j)
w1 -= np.matmul(np.linalg.inv(DF), F)
w[:, j + 1] = w1
# 3-D Plot
x = xl + np.arange(M + 1) * h
t = tb + np.arange(n + 1) * k
X, T = np.meshgrid(x, t)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(azim=225)
ax.plot_surface(X, T, w.T)
plt.xlabel('x')
plt.ylabel('t')
plt.show()
plt.close()
burgers(0, 1, 0, 2, 20, 40)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 8.1 Parabolic Equations
Step2: Backward Difference Method
Step3: Example
Step4: Example
Step5: Crank-Nicolson Method
Step6: Example
Step7: 8.2 Hyperbolic Equations
Step8: The CFL condition
Step9: 8.3 Elliptic Equations
Step10: Example
Step11: Finite Element Method for elliptic equations
Step12: Example
Step13: 8.4 Nonlinear partial differential equations
|
13,210
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
data = pd.read_csv("train.csv")
data.head()
print data.iloc[1][0]
plt.imshow(data.iloc[1][1:].reshape(28,28),cmap='Greys')
plt.show()
print data.iloc[28][0]
plt.imshow(data.iloc[28][1:].reshape(28,28),cmap='Greys')
plt.show()
np.unique(data['label'],return_counts=True)
def getTrainTest(digit):
digit_train = digit[0:int(len(digit)*.8)]
digit_train = digit_train[digit_train.columns[1:]].T
digit_test = digit[int(len(digit)*.8):]
digit_test = digit_test[digit_test.columns[1:]]
return (digit_train,digit_test)
zero = data[data['label']==0]
zero_train,zero_test = getTrainTest(zero)
one = data[data['label']==1]
one_train,one_test = getTrainTest(one)
two = data[data['label']==2]
two_train,two_test = getTrainTest(two)
three = data[data['label']==3]
three_train,three_test = getTrainTest(three)
four = data[data['label']==4]
four_train,four_test = getTrainTest(four)
zero_u,e,v = np.linalg.svd(zero_train,full_matrices=False)
one_u,e,v = np.linalg.svd(one_train,full_matrices=False)
two_u,e,v = np.linalg.svd(two_train,full_matrices=False)
three_u,e,v = np.linalg.svd(three_train,full_matrices=False)
four_u,e,v = np.linalg.svd(four_train,full_matrices=False)
#Regarding full_matrices = False
#If True, U and Vh are of shape (M,M), (N,N).
#If False, the shapes are (M,K) and (K,N), where K = min(M,N).
print zero_u.shape
print e.shape
print v.shape
for i in range(4):
plt.imshow(three_u[:,i].reshape(28,28),cmap='Greys')#first 5 columns of U
plt.show()
for i in range(4):
plt.imshow(zero_u[:,i].reshape(28,28),cmap='Greys')#first 5 columns of U
plt.show()
def classifyUnknownDigit(newDigit):
classes = [zero_u,one_u,two_u,three_u,four_u]
values = []
for U in classes:
values.append(np.linalg.norm((np.identity(len(U))-np.matrix(U)*np.matrix(U.T)).dot(newDigit),ord=2)/np.linalg.norm(newDigit,ord=2))
return values.index(min(values))
zero_pred = []
one_pred = []
two_pred = []
three_pred = []
four_pred = []
for i in range(len(four_test)):
four_pred.append(classifyUnknownDigit(four_test.iloc[i]))
for i in range(len(zero_test)):
zero_pred.append(classifyUnknownDigit(zero_test.iloc[i]))
for i in range(len(two_test)):
two_pred.append(classifyUnknownDigit(two_test.iloc[i]))
for i in range(len(one_test)):
one_pred.append(classifyUnknownDigit(one_test.iloc[i]))
for i in range(len(three_test)):
three_pred.append(classifyUnknownDigit(three_test.iloc[i]))
print "Accuracy"
print "------------"
print "0: ", zero_pred.count(0)/1.0/len(zero_pred) #count the number of 0's, divide by length of list to get accuracy.
print "1: ", one_pred.count(1)/1.0/len(one_pred)
print "2: ", two_pred.count(2)/1.0/len(two_pred)
print "3: ", three_pred.count(3)/1.0/len(three_pred)
print "4: ", four_pred.count(4)/1.0/len(four_pred)
np.unique(zero_pred,return_counts=True)
np.unique(four_pred,return_counts=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: So that's what the dataset looks like, each pixel is a column, each row is an image in the dataset. Actually I have a corresponding 'test' dataset but I'll just separate this one into train/test later on since it's big enough.
Step2: In the below bit of code, I made a function to separate the dataset into just 5 classes for demonstration purposes (only 0, 1, 2, 3, and 4). I also separated each of these digit datasets into a train and test dataset by assigning the first 80% of the dataset to train, and the rest for test. This is for each digit dataset.
Step3: Seeing the singular vectors reconstructed is pretty cool. Especially the '0' one. See below for '3' and '0'.
Step4: Below I have written a classification function and I just went through each of the test datasets to classify them as a digit. I used the U matrices of each of the digits (zero_u, one_u, etc.) as the 'classifier' for each class, and I iterated through them to get their resulting output. The minimum of the output is the class which is assigned by this function.
Step5: Woops forgot to print the 4 predictions. The above computation takes a few minutes and really beats up the CPU.
Step6: It's interesting to see how some digits are classified well like 1, 2, and 3, whereas the 0 and 4 are not as great. Still, 95% is pretty bad for real-world applications, especially for something as important as sending mail to the right people. The article talks about just letting humans classify the digits manually if it's not completely certain about a prediction (if there is no large distinction between values among the classifier matrices).
Step7: Similarly doing this for the four classifier, we see that it's pretty good but it's confusing 4's for 3's more than 1's,2's, and 0's. I guess the middle line of the '3' digit might be contributing to some confusion.
|
13,211
|
<ASSISTANT_TASK:>
Python Code:
import vpython as vp
#Code
def charge_color(charge):
if charge>0:
charge_color = vp.color.red
elif charge <0:
charge_color = vp.color.blue
else:
charge_color = vp.color.white
return charge_color
#
def getfield(position):
r = position
field = vp.vec(0,0,0)
for charge in charges:
qp = charge.q
rp = charge.pos
field = field + (r-rp)*kel*qp/vp.mag(r-rp)**3 # Electric Field
return field
def plot_field(position):
global field, magnitude
vector_field = vp.arrow(shaftwidth=6e-15)
field = getfield(position)
magnitude = vp.mag(field)
red = max(1-1e17/magnitude, 0)
blue = min(1e17/magnitude, 1)
if red >= blue:
blue = blue/red
red = 1.0
else:
red = red/blue
blue = 1.0
vector_field.pos = position
vector_field.axis = scale*field
vector_field.color = vp.vector(red,0,blue)
def mouse_to_field(vector_field):
position = vp.scene.mouse.pos
plot_field(position)
#
p = (70*"="+"\n")
p += "Haga clic o arrastre para trazar un vector de campo eléctrico producido.\n"
p += "En una pantalla táctil, toque o mantenga presionado y luego arrastre.\n"
p += "Las flechas que representan el campo son más azules si son de baja magnitud, más rojas si son altas.\n"# p += 70*"="+"\n"
p += ("rx = %2.20f m \n" %position.x)
p += ("ry = %2.20f m\n" %position.y)
p += ("rz = %2.20f m\n" %position.z)
p += ("r = %2.20f m \n" %vp.mag(position))
p += (70*"="+"\n")
p += ("Ex = %2.2f N/C\n" %field.x)
p += ("Ey = %2.2f N/C\n" %field.y)
p += ("Ez = %2.2f N/C\n" %field.z)
p += ("E = %2.2f N/C \n" %magnitude)
p += (70*"="+"\n")
vp.scene.caption = p
#For interactivity!
drag = False
vector_field = None
def down(ev):
global vector_field, drag
vector_field = vp.arrow(shaftwidth=6e-15)
field = mouse_to_field(vector_field)
drag = True
def move(ev):
global vector_field, drag
if not drag: return
mouse_to_field(vector_field)
def up(ev):
global vector_field, drag
mouse_to_field(vector_field)
drag = False
vp.scene.bind("mousedown", down)
vp.scene.bind("mousemove", move)
vp.scene.bind("mouseup", up)
##
scale = 4e-14/1e17
#scale = 0.002 #Scale for Electric field
##
fund_charge = 1.602e-19 #C: Fundamental charge
kel = 8.99e9 #Nm^2C^-2: Electric Constant
scale
s = "Campo Eléctrico de Disco uniformemente cargado."
vp.scene.title = s
vp.scene.background = vp.color.white
#vp.scene.width = 800
#vp.scene.height = 500
vp.scene.range = 2e-13
#Se dibuja SC con el que se trabajará
lenght_axis = 1e-13
shaftwidth = 6e-15
xaxis = vp.arrow(color=vp.color.black, pos=vp.vector(0,0,0), axis=lenght_axis*vp.vector(1,0,0), shaftwidth=shaftwidth)
xlabel = vp.label(pos=vp.vector(1.1*lenght_axis,0,0), text="x", color=vp.color.black, opacity=0, height=20, box=0 ,font='times')
yaxis = vp.arrow(color=vp.color.black, pos=vp.vector(0,0,0), axis=lenght_axis*vp.vector(0,1,0), shaftwidth=shaftwidth)
ylabel = vp.label(pos=vp.vector(0,1.1*lenght_axis,0), text="y", color=vp.color.black, opacity=0, height=20, box=0,font='times')
zaxis = vp.arrow(color=vp.color.black, pos=vp.vector(0,0,0), axis=lenght_axis*vp.vector(0,0,1), shaftwidth=shaftwidth)
zlabel = vp.label(pos=vp.vector(0,0,1.1*lenght_axis), text="z", color=vp.color.black, opacity=0, height=20, box=0,font='times')
#Parameters
Q = fund_charge # Total charge.
R = 1.0e-13 # Radius of disk.
N = 5000 # Number of point charges the disk will be made of.
A = vp.pi*R**2 # Area of the disk.
sigma = Q/A # Superficial density of charge
charge = Q/N # Single point charge.
a = A/N # Area occupied by a single point charge.
dx = vp.sqrt(a) # Distance between neighboring point charges.
print('A =', A, 'm^2')
print('sigma =', sigma, 'C/m^2')
print('q =', charge, 'C')
print('a =', a, 'm^2')
# Create a list of point charges.
charges = [] # An empty list.
x = 0
while (x <= R):
y = 0
ymax = vp.sqrt(R**2-x**2)
while (y <= ymax):
charges.append(vp.sphere(radius=0.1*dx, pos=vp.vector(x,y,0), q=charge, color=charge_color(charge)))
charges.append(vp.sphere(radius=0.1*dx, pos=vp.vector(-x,y,0), q=charge, color=charge_color(charge)))
charges.append(vp.sphere(radius=0.1*dx, pos=vp.vector(x,-y,0), q=charge, color=charge_color(charge)))
charges.append(vp.sphere(radius=0.1*dx, pos=vp.vector(-x,-y,0), q=charge, color=charge_color(charge)))
y = y + dx
x = x + dx
d = 30*dx
r1 = vp.vector(0.7*R,0,d)
r2 = vp.vector(-0.7*R,0,d)
r3 = vp.vector(0,0,d)
r4 = vp.vector(0.7*R,0,-d)
r5 = vp.vector(-0.7*R,0,-d)
r6 = vp.vector(0,0,-d)
plot_field(r1)
plot_field(r2)
plot_field(r3)
plot_field(r4)
plot_field(r5)
plot_field(r6)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Se configura la escena y el sistema coordenado
Step2: Se configura la fuente del campo eléctrico
Step3: Se representa la fuente del Campo Eléctrico
Step4: Se evalúa el campo eléctrico en determinados puntos
|
13,212
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
# a vector: the argument to the array function is a Python list
v = np.array([1,2,3,4])
v
# a matrix: the argument to the array function is a nested Python list
M = np.array([[1, 2], [3, 4]])
M
type(v), type(M)
v.shape
M.shape
M.size
np.shape(M)
np.size(M)
M.dtype
M[0,0] = "hello"
M = np.array([[1, 2], [3, 4]], dtype=complex)
M
# create a range
x = np.arange(0, 10, 1) # arguments: start, stop, step
x
x = np.arange(-1, 1, 0.1)
x
# using linspace, both end points ARE included
np.linspace(0, 10, 25)
np.logspace(0, 10, 10, base=np.e)
x, y = np.mgrid[0:5, 0:5] # similar to meshgrid in MATLAB
x
y
from numpy import random
# uniform random numbers in [0,1]
random.rand(5,5)
# standard normal distributed random numbers
random.randn(5,5)
# a diagonal matrix
np.diag([1,2,3])
# diagonal with offset from the main diagonal
np.diag([1,2,3], k=1)
np.zeros((3,3))
np.ones((3,3))
%%bash
echo '
1800 1 1 -6.1 -6.1 -6.1 1
1800 1 2 -15.4 -15.4 -15.4 1
1800 1 3 -15.0 -15.0 -15.0 1
1800 1 4 -19.3 -19.3 -19.3 1
1800 1 5 -16.8 -16.8 -16.8 1
1800 1 6 -11.4 -11.4 -11.4 1
1800 1 7 -7.6 -7.6 -7.6 1
1800 1 8 -7.1 -7.1 -7.1 1
1800 1 9 -10.1 -10.1 -10.1 1
1800 1 10 -9.5 -9.5 -9.5 1
' > test.dat
data = np.genfromtxt('test.dat')
data.shape
print data[:,0]
print data[0,:]
M = random.rand(3,3)
M
np.savetxt("random-matrix.csv", M)
!cat random-matrix.csv
np.savetxt("random-matrix.csv", M, fmt='%.5f') # fmt specifies the format
!cat random-matrix.csv
A = np.loadtxt("random-matrix.csv")
print A
np.save("random-matrix.npy", M)
!file random-matrix.npy
np.load("random-matrix.npy")
M.itemsize # bytes per element
M.nbytes # number of bytes
M.ndim # number of dimensions
# v is a vector, and has only one dimension, taking one index
v[0]
# M is a matrix, or a 2 dimensional array, taking two indices
M[1,1]
M
M[1]
M[1,:] # row 1
M[:,1] # column 1
M[0,0] = 1
M
# also works for rows and columns
M[1,:] = 0
M[:,2] = -1
M
A = np.array([1,2,3,4,5])
A
A[1:3]
A[1:3] = [-2,-3]
A
A[::] # lower, upper, step all take the default values
A[::2] # step is 2, lower and upper defaults to the beginning and end of the array
A[:3] # first three elements
A[3:] # elements from index 3
A = np.array([1,2,3,4,5])
A[-1] # the last element in the array
A[-3:] # the last three elements
A = np.array([[n+m*10 for n in range(5)] for m in range(5)])
A
# a block from the original array
A[1:4, 1:4]
# strides
A[::2, ::2]
row_indices = [1, 2, 3]
A[row_indices]
col_indices = [1, 2, -1] # remember, index -1 means the last element
A[row_indices, col_indices]
B = np.array([n for n in range(5)])
B
row_mask = np.array([True, False, True, False, False])
B[row_mask]
# same thing
row_mask = np.array([1,0,1,0,0], dtype=bool)
B[row_mask]
x = np.arange(0, 10, 0.5)
x
mask = (5 < x) * (x < 7.5)
mask
x[mask]
indices = np.where(mask)
indices
x[indices] # this indexing is equivalent to the fancy indexing x[mask]
np.diag(A)
np.diag(A, -1)
v2 = np.arange(-3,3)
v2
row_indices = [1, 3, 5]
v2[row_indices] # fancy indexing
v2.take(row_indices)
np.take([-3, -2, -1, 0, 1, 2], row_indices)
which = [1, 0, 1, 0]
choices = [[-2,-2,-2,-2], [5,5,5,5]]
np.choose(which, choices)
v1 = np.arange(0, 5)
v1 * 2
v1 + 2
print "A*2 ",A * 2
print "A+2 ",A + 2
A * A # element-wise multiplication
v1 * v1
A.shape, v1.shape
A * v1
np.dot(A, A)
np.dot(A, v1)
np.dot(v1, v1)
M = np.matrix(A)
v = np.matrix(v1).T # make it a column vector
v
M * M
M * v
# inner product
v.T * v
# with matrix objects, standard matrix algebra applies
v + M*v
v = np.matrix([1,2,3,4,5,6]).T
M.shape, v.shape
M * v
C = np.matrix([[1j, 2j], [3j, 4j]])
C
np.conjugate(C)
C.H
np.real(C) # same as: C.real
np.imag(C) # same as: C.imag
np.angle(C+1)
np.abs(C)
np.linalg.inv(C) # equivalent to C.I
C.I * C
np.linalg.det(C)
np.linalg.det(C.I)
np.shape(data)
np.mean(data[:,3])
np.std(data[:,3]), np.var(data[:,3])
data[:,3].min()
data[:,3].max()
d = np.arange(0, 10)
d
# sum up all elements
np.sum(d)
# product of all elements
np.prod(d+1)
# cummulative sum
np.cumsum(d)
# cummulative product
np.cumprod(d+1)
# same as: diag(A).sum()
np.trace(A)
a = np.array([0,2,33.3,np.NaN,2.])
print np.max(a)
print np.nanmax(a)
print np.argmax(d)
!head -n 3 stockholm_td_adj.dat
data = np.loadtxt('stockholm_td_adj.dat.bz2')
np.unique(data[:,1]) # the month column takes values from 1 to 12
mask_feb = data[:,1] == 2
# the temperature data is in column 3
np.mean(data[mask_feb,3])
months = np.arange(1,13)
monthly_mean = [np.mean(data[data[:,1] == month, 3]) for month in months]
import matplotlib.pyplot as plt
%matplotlib inline
fig, ax = plt.subplots()
ax.bar(months, monthly_mean)
ax.set_xlabel("Month")
ax.set_ylabel("Monthly avg. temp.");
m = random.rand(3,3)
m
# global max
m.max()
# max in each column
m.max(axis=0)
# max in each row
m.max(axis=1)
A
n, m = A.shape
B = A.reshape((1,n*m))
B
B[0,0:5] = 5 # modify the array
B
A # and the original variable is also changed. B is only a different view of the same data
B = A.flatten()
B
B[0:5] = 10
B
A # now A has not changed, because B's data is a copy of A's, not refering to the same data
array1 = np.arange(90).reshape((-1, 10))
array1.shape
array1[5,7]
array1[5,:]
array1[5,1:8]
array1[5:-1,1:8]
array1[(5, 7), (6, 8)]
v = np.array([1,2,3])
v.shape
# make a column matrix of the vector v
v[:, np.newaxis]
# column matrix
v[:,np.newaxis].shape
# row matrix
v[np.newaxis,:].shape
a = np.array([[1, 2], [3, 4]])
# repeat each element 3 times
np.repeat(a, 3)
# tile the matrix 3 times
np.tile(a, 3)
b = np.array([[5, 6]])
np.concatenate((a, b), axis=0)
np.concatenate((a, b.T), axis=1)
np.vstack((a,b))
np.hstack((a,b.T))
A = np.array([[1, 2], [3, 4]])
A
# now B is referring to the same array data as A
B = A
# changing B affects A
B[0,0] = 10
B
A
B = A.copy()
# now, if we modify B, A is not affected
B[0,0] = -5
B
A
v = np.array([1,2,3,4])
for element in v:
print(element)
M = np.array([[1,2], [3,4]])
for row in M:
print("row", row)
for element in row:
print(element)
for row_idx, row in enumerate(M):
print("row_idx", row_idx, "row", row)
for col_idx, element in enumerate(row):
print("col_idx", col_idx, "element", element)
# update the matrix M: square each element
M[row_idx, col_idx] = element ** 2
# each element in M is now squared
M
def Theta(x):
Scalar implemenation of the Heaviside step function.
if x >= 0:
return 1
else:
return 0
Theta(np.array([-3,-2,-1,0,1,2,3]))
Theta_vec = np.vectorize(Theta)
Theta_vec(np.array([-3,-2,-1,0,1,2,3]))
def Theta(x):
Vector-aware implemenation of the Heaviside step function.
return 1 * (x >= 0)
Theta(np.array([-3,-2,-1,0,1,2,3]))
# still works for scalars as well
Theta(-1.2), Theta(2.6)
def funct3(val):
if val > 3: # Doesn’t work with array
x = 2
else:
x = 5
return x
vfunct3 = np.vectorize(funct3)
vfunct3(np.array([3,4,5,1,2])) # Now works!
M
if (M > 5).any():
print("at least one element in M is larger than 5")
else:
print("no element in M is larger than 5")
if (M > 5).all():
print("all elements in M are larger than 5")
else:
print("all elements in M are not larger than 5")
M.dtype
M2 = M.astype(float)
M2
M2.dtype
M3 = M.astype(bool)
M3
%reload_ext version_information
%version_information numpy
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the numpy package the terminology used for vectors, matrices and higher-dimensional data sets is array.
Step2: The v and M objects are both of the type ndarray that the numpy module provides.
Step3: The difference between the v and M arrays is only their shapes. We can get information about the shape of an array by using the ndarray.shape property.
Step4: The number of elements in the array is available through the ndarray.size property
Step5: Equivalently, we could use the function numpy.shape and numpy.size
Step6: So far the numpy.ndarray looks awefully much like a Python list (or nested list). Why not simply use Python lists for computations instead of creating a new array type?
Step7: We get an error if we try to assign a value of the wrong type to an element in a numpy array
Step8: If we want, we can explicitly define the type of the array data when we create it, using the dtype keyword argument
Step9: Common data types that can be used with dtype are
Step10: linspace and logspace
Step11: mgrid
Step12: random data
Step13: diag
Step14: zeros and ones
Step15: File I/O
Step16: Using numpy.savetxt we can store a Numpy array to a file in CSV format
Step17: loadtxt can read directly from gzipped (.gz) and bzip2 (.bz2) files.
Step18: More properties of the numpy arrays
Step19: Manipulating arrays
Step20: If we omit an index of a multidimensional array it returns the whole row (or, in general, a N-1 dimensional array)
Step21: The same thing can be achieved with using
Step22: We can assign new values to elements in an array using indexing
Step23: Index slicing
Step24: Array slices are mutable
Step25: We can omit any of the three parameters in M[lower
Step26: Negative indices counts from the end of the array (positive index from the begining)
Step27: Index slicing works exactly the same way for multidimensional arrays
Step28: Fancy indexing
Step29: We can also use index masks
Step30: This feature is very useful to conditionally select elements from an array, using for example comparison operators
Step31: Functions for extracting data from arrays and creating arrays
Step32: diag
Step33: take
Step34: But take also works on lists and other objects
Step35: choose
Step36: Linear algebra
Step37: Element-wise array-array operations
Step38: If we multiply arrays with compatible shapes, we get an element-wise multiplication of each row
Step39: Matrix algebra
Step40: Alternatively, we can cast the array objects to the type matrix. This changes the behavior of the standard arithmetic operators +, -, * to use matrix algebra.
Step41: If we try to add, subtract or multiply objects with incomplatible shapes we get an error
Step42: See also the related functions
Step43: Hermitian conjugate
Step44: We can extract the real and imaginary parts of complex-valued arrays using real and imag
Step45: Or the complex argument and absolute value
Step46: Matrix computations
Step47: Determinant
Step48: Data processing
Step49: mean
Step50: standard deviations and variance
Step51: min and max
Step52: sum, prod, and trace
Step53: If an array as NaN elements, it is possible to use the same operations with the nan in their name
Step54: Another important feature, is finding the index of the maximum of an array.
Step55: Computations on subsets of arrays
Step56: The dataformat is
Step57: With these tools we have very powerful data processing capabilities at our disposal. For example, to extract the average monthly average temperatures for each month of the year only takes a few lines of code
Step58: Calculations with higher-dimensional data
Step59: Many other functions and methods in the array and matrix classes accept the same (optional) axis keyword argument.
Step60: We can also use the function flatten to make a higher-dimensional array into a vector. But this function create a copy of the data.
Step61: Now, a few more examples with some graphics.
Step62: In this case, one of the axis length is left as -1. Numpy will compute this dimension for you.
Step63: The array has 10 columns and 9 rows.
Step64: Or we can select an entire row
Step65: Part of a row
Step66: Or a rectangle
Step67: We can even use non-sequential indexing
Step68: Adding a new dimension
Step69: Stacking and repeating arrays
Step70: concatenate
Step71: hstack and vstack
Step72: Copy and "deep copy"
Step73: If we want to avoid this behavior, so that when we get a new completely independent object B copied from A, then we need to do a so-called "deep copy" using the function copy
Step74: Iterating over array elements
Step75: When we need to iterate over each element of an array and modify its elements, it is convenient to use the enumerate function to obtain both the element and its index in the for loop
Step77: Vectorizing functions
Step78: OK, that didn't work because we didn't write the Theta function so that it can handle a vector input...
Step80: We can also implement the function to accept a vector input from the beginning (requires more effort but might give better performance)
Step81: Another example
Step82: This function does not work with arrays.
Step83: Using arrays in conditions
Step84: Type casting
Step85: Further reading
|
13,213
|
<ASSISTANT_TASK:>
Python Code:
cred=db.login.find_one({})
#Tweepy Login credensials are stored in mongodb databse
auth = tweepy.OAuthHandler(cred["consumerKey"], cred["consumerSecret"])
auth.set_access_token(cred["oauthTocken"], cred["oauthTokenSecret"])
api = tweepy.API(auth)
api
cities=["Delhi","Kolkata","Bangalore","Surat","India"]
loc = list(db.WOEID.find({"country" : "India",'name':{"$in":cities}},{'_id':0,'name':1,'woeid':1}))
pprint.pprint(loc[1])
Trend={}
for place in loc:
Trend[place["name"]] = api.trends_place(place["woeid"])
for key in Trend:
for trend in Trend[key][0]['trends']:
del trend['query']
del trend["url"]
for key in Trend:
db.placesT.insert_one(Trend[key][0])
print "Insertion Complete"
#Delete All Unused Variables
del key,Trend
#extract Data from databse
trend=list(db.placesT.find({}))
#colName=trend[0]['locations'][0]['name']
trendName=[li["name"] for li in trend[0]['trends']]
tweetVolume=[li["tweet_volume"] for li in trend[0]["trends"]]
tweetVolume
trend[0]['as_of']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2>Where On Earth ID (WOEID)</h2>
Step2: Performing Latest Trend Query & Strore them
Step3: Clean OUr Data drop unneccesary content
Step4: Insert Data Into database
Step5: AnaLysing Data
|
13,214
|
<ASSISTANT_TASK:>
Python Code:
from collections import defaultdict
from itertools import chain
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
from ipywidgets import interact
from chempy import Substance, Reaction, ReactionSystem
from chempy.kinetics.rates import Arrhenius, MassAction
from chempy.kinetics.ode import get_odesys
from chempy.printing.numbers import number_to_scientific_latex
from chempy.units import SI_base_registry, default_units as u
sp.init_printing()
%matplotlib inline
A, B, C = map(Substance, 'ABC')
r1 = Reaction({'A'}, {'B'}, MassAction(Arrhenius(unique_keys=('A1', 'Ea_R_1'))))
r2 = Reaction({'B'}, {'C'}, MassAction(Arrhenius(unique_keys=('A2', 'Ea_R_2'))))
rsys = ReactionSystem([r1, r2])
rsys
params = {'A1': 1e11/u.s, 'A2': 2e11/u.s, 'Ea_R_1': 8e3*u.K, 'Ea_R_2': 8.5e3*u.K, 'temperature': 300*u.K}
c0 = defaultdict(lambda: 0*u.molar, {'A': 1*u.molar})
variables = c0.copy()
variables.update(params)
rsys.rates(variables)
odesys, extra = get_odesys(rsys, include_params=False, lower_bounds=0)
print(dict(zip(odesys.dep, odesys.names)))
print(dict(zip(odesys.params, odesys.param_names)))
odesys.exprs
params2 = params.copy()
pk = 'Ea_R_2'
params2[pk] = [8.1e3, 8.2e3, 8.3e3]*u.K
res2 = odesys.integrate(7*u.s, c0, params2, integrator='cvode')
fig, axes = plt.subplots(1, len(res2), figsize=(14, 4))
for r, ax in zip(res2, axes):
r.plot(ax=ax)
ax.set_title('$%s = %s$' % (pk.replace('_', '\\_'), number_to_scientific_latex(r.named_param('Ea_R_2'))))
def integrate_and_plot(T_C=25):
res = odesys.integrate(7*u.s, c0, dict(params, temperature=(T_C+273.15)*u.K), integrator='cvode')
res.plot()
interact(integrate_and_plot)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will use a generic model representing a decay-chain with two decays
Step2: "Arrhenius" behaviour means that the rate of reaction depends exponentially on the inverse absolute temperature.
Step3: Let's look at the influence of Ea_R_2, we will choose three temperatures
Step4: Running the integartion & plotting the result
Step5: We can also use ipywidgets to get interactive controls
|
13,215
|
<ASSISTANT_TASK:>
Python Code:
def ptrans(f,t):
import numpy as np
g = np.empty_like(f)
if f.ndim == 1:
W = f.shape[0]
col = np.arange(W)
g = f[(col-t)%W]
elif f.ndim == 2:
H,W = f.shape
rr,cc = t
row,col = np.indices(f.shape)
g = f[(row-rr)%H, (col-cc)%W]
elif f.ndim == 3:
Z,H,W = f.shape
zz,rr,cc = t
z,row,col = np.indices(f.shape)
g = f[(z-zz)%Z, (row-rr)%H, (col-cc)%W]
return g
# implementation using periodic convolution
def ptrans2(f, t):
f, t = np.asarray(f), np.asarray(t).astype('int32')
h = np.zeros(2*np.abs(t) + 1)
t = t + np.abs(t)
h[tuple(t)] = 1
g = ia.pconv(f, h)
return g
def ptrans2d(f,t):
rr,cc = t
H,W = f.shape
r = rr%H
c = cc%W
g = np.empty_like(f)
g[:r,:c] = f[H-r:H,W-c:W]
g[:r,c:] = f[H-r:H,0:W-c]
g[r:,:c] = f[0:H-r,W-c:W]
g[r:,c:] = f[0:H-r,0:W-c]
return g
testing = (__name__ == '__main__')
if testing:
! jupyter nbconvert --to python ptrans.ipynb
import numpy as np
%matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import sys,os
ia898path = os.path.abspath('../../')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
if testing:
# 2D example
f = np.arange(15).reshape(3,5)
print("Original 2D image:\n",f,"\n\n")
print("Image translated by (0,0):\n",ia.ptrans(f, (0,0)).astype(int),"\n\n")
print("Image translated by (0,1):\n",ia.ptrans(f, (0,1)).astype(int),"\n\n")
print("Image translated by (-1,2):\n",ia.ptrans(f, (-1,2)).astype(int),"\n\n")
if testing:
# 3D example
f1 = np.arange(60).reshape(3,4,5)
print("Original 3D image:\n",f1,"\n\n")
print("Image translated by (0,0,0):\n",ia.ptrans(f1, (0,0,0)).astype(int),"\n\n")
print("Image translated by (0,1,0):\n",ia.ptrans(f1, (0,1,0)).astype(int),"\n\n")
print("Image translated by (-1,3,2):\n",ia.ptrans(f1, (-1,3,2)).astype(int),"\n\n")
if testing:
# 2D example
f = mpimg.imread('../data/cameraman.tif')
plt.imshow(f,cmap='gray'), plt.title('Original 2D image - Cameraman')
plt.imshow(ia.ptrans(f, np.array(f.shape)//3),cmap='gray'), plt.title('Cameraman periodically translated')
if testing:
print('testing ptrans')
f = np.array([[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15]],'uint8')
print(repr(ia.ptrans(f, [-1,2]).astype(np.uint8)) == repr(np.array(
[[ 9, 10, 6, 7, 8],
[14, 15, 11, 12, 13],
[ 4, 5, 1, 2, 3]],'uint8')))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples
Step2: Example 1
Step3: Example 2
Step4: Equation
|
13,216
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from micromeritics import util
from micromeritics import isotherm_examples as ex
import matplotlib.pyplot as plt
carb = ex.carbon_black() # example isotherm of Carbon Black with N2 at 77K
sial = ex.silica_alumina() # example isotherm of Silica Alumina with N2 at 77K
mcm = ex.mcm_41() # example isotherm of MCM 41 with N2 at 77K
fig = plt.figure(figsize=(12,5))
axes = fig.add_subplot(111)
plt.title('Isotherm Plot')
plt.ylabel("Quantity Adsorbed (cm^3/g STP)")
plt.xlabel("Relative Pressure")
plt.gca().set_xscale('linear')
plt.plot( carb.Prel, carb.Qads, 'ro', label='Carbon Black with N2 at 77K' )
plt.plot( sial.Prel, sial.Qads, 'bo-', label='Silica Alumina with N2 at 77K')
plt.plot( mcm.Prel, mcm.Qads, 'go-', label='MCM 41 with N2 at 77K')
legend = axes.legend(loc='upper left', shadow=True)
plt.show()
fig = plt.figure(figsize=(12,5))
axes = fig.add_subplot(111)
plt.title('Isotherm Plot')
plt.ylabel("Quantity Adsorbed (cm^3/g STP)")
plt.xlabel("Relative Pressure")
plt.gca().set_xscale('log')
plt.plot( carb.Prel, carb.Qads, 'ro', label='Carbon Black with N2 at 77K' )
plt.plot( sial.Prel, sial.Qads, 'bo-', label='Silica Alumina with N2 at 77K')
plt.plot( mcm.Prel, mcm.Qads, 'go-', label='MCM 41 with N2 at 77K')
legend = axes.legend(loc='upper left', shadow=True)
plt.show()
zsm = ex.zsm_5() # example isotherm of ZSM-5 with Ar at 87K
fig = plt.figure(figsize=(12,5))
axes = fig.add_subplot(111)
plt.title('Isotherm Plot')
plt.ylabel("Quantity Adsorbed (cm^3/g STP)")
plt.xlabel("Absolute Pressure (mmHg)")
plt.gca().set_xscale('log')
plt.plot( zsm.Pabs, zsm.Qads, 'ro', label='ZSM-5 with Ar at 87K' )
legend = axes.legend(loc='upper left', shadow=True)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It is also useful to show the isotherm with the Pressure axis scaled as logarithmic.
Step2: While it is more common to show isotherm data using relative pressure, it is also worth while to have the absolute pressures available. Below is an example data set for ZSM-5 analyzed with argon gas at 87k shown with absolute pressure as the dependent variable.
|
13,217
|
<ASSISTANT_TASK:>
Python Code::
import matplotlib.pyplot as plt
plt.plot(k,l)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
13,218
|
<ASSISTANT_TASK:>
Python Code:
# Limit processing of protocol parts for development
PROCESS_PARTS_LIMIT = 500
# Enable caching of protocol parts data (not efficient, should only be used for local development with sensible PROCESS_PARTS_LIMIT)
PROCESS_PARTS_CACHE = True
# Filter the meetings to be processed, these kwargs are passed along to DataFlows filter_rows processor for meetings resource
MEETINGS_FILTER_ROWS_KWARGS = {'equals': [{'KnessetNum': 20}]}
# Don'e use local data - loads everything from knesset data remote storage
# When set to False - also enables caching, so you won't download from remote storage on 2nd run.
USE_DATA = False
from dataflows import filter_rows, cache
from datapackage_pipelines_knesset.common_flow import load_knesset_data, load_member_names
# Loads a dict containing mapping between knesset member id and the member name
member_names = load_member_names(use_data=USE_DATA)
# define flow steps for loading the source committee meetings data
# the actual loading is done later in the Flow
load_steps = (
load_knesset_data('people/committees/meeting-attendees/datapackage.json', USE_DATA),
filter_rows(**MEETINGS_FILTER_ROWS_KWARGS)
)
if not USE_DATA:
# when loading from URL - enable caching which will skip loading on 2nd run
load_steps = (cache(*load_steps, cache_path='.cache/people-committee-meeting-attendees-knesset-20'),)
from collections import defaultdict
from dataflows import Flow
stats = defaultdict(int)
member_attended_meetings = defaultdict(int)
def process_meeting_protocol_part(row):
stats['processed parts'] += 1
if row['body'] and 'אנחנו ככנסת צריכים להיות ערוכים' in row['body']:
stats['meetings contain text: we as knesset need to be prepared'] += 1
def process_meeting(row):
stats['total meetings'] += 1
if row['attended_mk_individual_ids']:
for mk_id in row['attended_mk_individual_ids']:
member_attended_meetings[mk_id] += 1
parts_filename = row['parts_parsed_filename']
if parts_filename:
if PROCESS_PARTS_LIMIT and stats['processed parts'] < PROCESS_PARTS_LIMIT:
steps = (load_knesset_data('committees/meeting_protocols_parts/' + parts_filename, USE_DATA),)
if not USE_DATA and PROCESS_PARTS_CACHE:
steps = (cache(*steps, cache_path='.cache/committee-meeting-protocol-parts/' + parts_filename),)
steps += (process_meeting_protocol_part,)
Flow(*steps).process()
process_steps = (process_meeting,)
from dataflows import Flow, dump_to_path
Flow(*load_steps, *process_steps, dump_to_path('data/committee-meeting-attendees-parts')).process()
from collections import deque
import yaml
top_attended_member_names = [member_names[mk_id] for mk_id, num_attended in
deque(sorted(member_attended_meetings.items(), key=lambda kv: kv[1]), maxlen=5)]
print('\n')
print('-- top attended members --')
print(top_attended_member_names)
print('\n')
print('-- stats --')
print(yaml.dump(dict(stats), default_flow_style=False, allow_unicode=True))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load source data
Step2: Inspect the datapackages which will be loaded
Step3: Run the flow
Step4: Aggregate and print stats
|
13,219
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD-3-Clause
import mne
from mne import io
from mne.datasets import sample
from mne.cov import compute_covariance
print(__doc__)
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 40, n_jobs=1, fir_design='firwin')
raw.info['bads'] += ['MEG 2443'] # bads + 1 more
events = mne.read_events(event_fname)
# let's look at rare events, button presses
event_id, tmin, tmax = 2, -0.2, 0.5
reject = dict(mag=4e-12, grad=4000e-13, eeg=80e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=('meg', 'eeg'),
baseline=None, reject=reject, preload=True)
# Uncomment next line to use fewer samples and study regularization effects
# epochs = epochs[:20] # For your data, use as many samples as you can!
method_params = dict(diagonal_fixed=dict(mag=0.01, grad=0.01, eeg=0.01))
noise_covs = compute_covariance(epochs, tmin=None, tmax=0, method='auto',
return_estimators=True, verbose=True, n_jobs=1,
projs=None, rank=None,
method_params=method_params)
# With "return_estimator=True" all estimated covariances sorted
# by log-likelihood are returned.
print('Covariance estimates sorted from best to worst')
for c in noise_covs:
print("%s : %s" % (c['method'], c['loglik']))
evoked = epochs.average()
evoked.plot(time_unit='s') # plot evoked response
evoked.plot_white(noise_covs, time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Compute covariance using automated regularization
Step3: Show the evoked data
Step4: We can then show whitening for our various noise covariance estimates.
|
13,220
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
# Config the matplotlib backend as plotting inline in IPython
%matplotlib inline
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.' # Change me to store data elsewhere
def download_progress_hook(count, blockSize, totalSize):
A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
Download a file if not present, and make sure it's the right size.
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
import random
def display_samples(data_folders, sample_size):
Displays an image randomly from picked from each folder of data_folders
for folder in data_folders:
print (folder)
image_files = os.listdir(folder)
selected_image = random.sample(image_files, sample_size)
for image in selected_image:
i = Image(os.path.join(folder, image))
display(i)
print ("Samples from Training Set")
display_samples(train_folders, 1)
print ("Samples from Test Set")
display_samples(test_folders, 1)
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
Load the data for a single letter label.
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
%matplotlib inline
def disp_pickles_data(data_folders):
for folder in data_folders:
pickle_filename = ''.join(folder) + '.pickle'
try:
with open(pickle_filename, 'rb') as f:
dataset = pickle.load(f)
except Exception as exp:
print ('Unable to read dataset from ', pickle_filename, ': ', exp)
return
#display 8 samples from this pickle
plt.figure()
plt.suptitle('Sample images with label %s' % (folder[-1]))
for i, img in enumerate(random.sample(list(dataset), 8)):
plt.subplot(2, 4, i + 1)
plt.axis('off')
plt.imshow(img)
disp_pickles_data(train_folders)
def display_image_count_by_class(data_folders):
for folder in data_folders:
pickle_filename = folder + '.pickle'
try:
with open(pickle_filename, 'rb') as f:
dataset = pickle.load(f)
except Exception as exp:
print('Unable to load data from ', pickle_filename, ': ', exp)
return
print('# of images in ', pickle_filename, ' = ',len(dataset))
display_image_count_by_class(train_folders)
display_image_count_by_class(test_folders)
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
pretty_labels = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J'}
def disp_sample_dataset(dataset, labels):
items = random.sample(range(len(labels)), 8)
for i, item in enumerate(items):
plt.subplot(2, 4, i+1)
plt.axis('off')
plt.title(pretty_labels[labels[item]])
plt.imshow(dataset[item])
disp_sample_dataset(train_dataset, train_labels)
disp_sample_dataset(valid_dataset, valid_labels)
disp_sample_dataset(test_dataset, test_labels)
pickle_file = os.path.join(data_root, 'notMNIST.pickle')
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
regr = LogisticRegression()
X_test = test_dataset.reshape(test_dataset.shape[0], 28 * 28)
y_test = test_labels
sample_size = 50
X_train = train_dataset[:sample_size].reshape(sample_size, 784)
y_train = train_labels[:sample_size]
%time regr.fit(X_train, y_train)
regr.score(X_test, y_test)
pred_labels = regr.predict(X_test)
disp_sample_dataset(test_dataset, pred_labels)
sample_size = 100
X_train = train_dataset[:sample_size].reshape(sample_size, 784)
y_train = train_labels[:sample_size]
%time regr.fit(X_train, y_train)
regr.score(X_test, y_test)
sample_size = 1000
X_train = train_dataset[:sample_size].reshape(sample_size, 784)
y_train = train_labels[:sample_size]
%time regr.fit(X_train, y_train)
regr.score(X_test, y_test)
X_valid = valid_dataset[:sample_size].reshape(sample_size, 784)
y_valid = valid_labels[:sample_size]
regr.score(X_valid, y_valid)
pred_labels = regr.predict(X_valid)
disp_sample_dataset(valid_dataset, pred_labels)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labeled examples. Given these sizes, it should be possible to train models quickly on any machine.
Step4: Extract the dataset from the compressed .tar.gz file.
Step6: Problem 1
Step8: Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.
Step9: Problem 2
Step10: Problem 3
Step11: Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed. The labels will be stored into a separate array of integers 0 through 9.
Step12: Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
Step13: Problem 4
Step14: Finally, let's save the data for later reuse
Step15: Problem 5
|
13,221
|
<ASSISTANT_TASK:>
Python Code:
import geopandas as gpd
import matplotlib.pyplot as plt
import seaborn as sns
from cartoframes.auth import set_default_credentials
from cartoframes.data.observatory import *
from cartoframes.data.services import Isolines
from cartoframes.viz import *
sns.set_style('whitegrid')
%matplotlib inline
from cartoframes.auth import set_default_credentials
set_default_credentials('creds.json')
Catalog().subscriptions().datasets.to_dataframe()
pois_ds = Dataset.get('pb_points_of_i_94bda91b')
pois_ds.head()
sql_query =
SELECT * except(do_label) FROM $dataset$
WHERE TRADE_DIVISION = 'DIVISION M. - SPORTS'
AND ST_IntersectsBox(geom, -3.716398,40.407437,-3.690477,40.425277)
pois_df = pois_ds.to_dataframe(sql_query=sql_query)
# To keep only the latest version of POI's
pois_df = pois_df.sort_values(['NAME', 'do_date']).groupby('NAME').first().reset_index()
pois_df.head()
iso_service = Isolines()
isochrones_gdf, isochrones_metadata = iso_service.isochrones(pois_df, [300], mode='walk', geom_col='geom')
isochrones_gdf.head()
pois_df['isochrone'] = isochrones_gdf.sort_values('source_id')['the_geom'].values
pois_df.head()
Map([Layer(pois_df, geom_col='geom'),
Layer(pois_df, geom_col='isochrone', style=basic_style(opacity=0.1))])
enrichment = Enrichment()
# Here we can use any variable because we're only interested in counts
pois_df = enrichment.enrich_polygons(
pois_df,
variables=['CLASS_517d6003'],
aggregation='COUNT',
geom_col='isochrone'
)
# We rename the column name to give it a more descriptive name
pois_df.rename(columns={'CLASS_y':'n_pois'}, inplace=True)
pois_df.head()
Map(Layer(pois_df, geom_col='geom',
style=size_continuous_style('n_pois'),
legends=size_continuous_legend('# POIs'),
popup_hover=[popup_element('NAME', 'Name'),
popup_element('n_pois', 'Number of POIs')]))
pois_df = enrichment.enrich_polygons(
pois_df,
variables=['CLASS_517d6003'],
aggregation='COUNT',
geom_col='iso_10walk',
filters={Variable.get('CLASS_517d6003').id:"= 'EATING PLACES/RESTAURANTS'"}
)
# We rename the column name to give it a more descriptive name
pois_df.rename(columns={'CLASS':'n_pois_eating'}, inplace=True)
pois_df.head()
Map(Layer(pois_df, geom_col='geom',
style=size_continuous_style('n_pois_eating'),
legends=size_continuous_legend('# Eating POIs'),
popup_hover=[popup_element('NAME', 'Name'),
popup_element('n_pois_eating', 'Number of eating places')]))
Layout([Map(Layer(pois_df, geom_col='geom',
style=size_continuous_style('n_pois'),
legends=size_continuous_legend('# POIs'),
popup_hover=[popup_element('NAME', 'Name'),
popup_element('n_pois', 'Number of POIs')])),
Map(Layer(pois_df, geom_col='geom',
style=size_continuous_style('n_pois_eating'),
legends=size_continuous_legend('# Eating POIs'),
popup_hover=[popup_element('NAME', 'Name'),
popup_element('n_pois_eating', 'Number of eating places')]))],
map_height=550)
plt.figure(figsize=(12,5))
sns.regplot(pois_df['n_pois'], pois_df['n_pois_eating'],
scatter_kws={'color':'blue', 'alpha':0.5}, line_kws={'color':'red'})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set CARTO default credentials
Step3: Note about credentials
Step4: <a id='section2'></a>
Step5: Visualize isochrones
Step6: <a id='section3'></a>
Step7: <a id='section31'></a>
Step8: Visualize enrichment
Step9: <a id='section32'></a>
Step10: Visualize enrichment
Step11: <a id='section33'></a>
|
13,222
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import datetime as dt
from QuantLib import *
plt.style.use('fivethirtyeight')
bid_rates = [0.044696, 0.0441, 0.0441, 0.0439, 0.0439, 0.04405, 0.0441, 0.043954, 0.044863]
mid_rates = [0.045128, 0.044413, 0.044288, 0.044, 0.044125, 0.044263, 0.04435, 0.04484, 0.045915]
offer_rates = [0.04556, 0.044725, 0.044475, 0.0441, 0.04435, 0.044475, 0.0446, 0.045725, 0.046967]
tenors = [Period('6m'), Period('9m'), Period('1Y'), Period('2Y'), Period('3Y'), Period('4Y'), Period('5Y'), Period('7y'), Period('10y')]
bid_quotes = [QuoteHandle(SimpleQuote(r)) for r in bid_rates]
mid_quotes = [QuoteHandle(SimpleQuote(r)) for r in mid_rates]
offer_quotes = [QuoteHandle(SimpleQuote(r)) for r in offer_rates]
shibor_index = Shibor(Period(3, Months))
bid_instruments = [ShiborSwapRateHelper(r, t, 3 * Months, shibor_index) for r, t in zip(bid_quotes, tenors)]
mid_instruments = [ShiborSwapRateHelper(r, t, 3 * Months, shibor_index) for r, t in zip(mid_quotes, tenors)]
offer_instruments = [ShiborSwapRateHelper(r, t, 3 * Months, shibor_index) for r, t in zip(offer_quotes, tenors)]
calendar = China(China.IB)
today = calendar.adjust(Date.todaysDate())
Settings.instance().evaluationDate = today
bid_curve = PiecewiseLinearZero(today, bid_instruments, Actual365Fixed())
mid_curve = PiecewiseLinearZero(today, mid_instruments, Actual365Fixed())
offer_curve = PiecewiseLinearZero(today, offer_instruments, Actual365Fixed())
%%time
pillars = [today + i for i in range(3660)]
bid_zero_rates = [bid_curve.zeroRate(d, Actual360(), Continuous, Annual, True).rate() * 100. for d in pillars]
mid_zero_rates = [mid_curve.zeroRate(d, Actual360(), Continuous, Annual, True).rate() * 100. for d in pillars]
offer_zero_rates = [offer_curve.zeroRate(d, Actual360(), Continuous, Annual, True).rate() * 100. for d in pillars]
py_dates = [dt.datetime(d.year(), d.month(), d.dayOfMonth()) for d in pillars]
plt.figure(figsize=(12, 6))
plt.title("Chinese Shibor/3M Swap Zero Rate curve", fontsize=16)
plt.plot(py_dates, bid_zero_rates)
plt.plot(py_dates, mid_zero_rates)
plt.plot(py_dates, offer_zero_rates)
plt.legend(['bid', 'mid', 'offer'])
%%time
bid_df = [bid_curve.discount(d, True) for d in pillars]
mid_df = [mid_curve.discount(d, True) for d in pillars]
offer_df = [offer_curve.discount(d, True) for d in pillars]
plt.figure(figsize=(12, 6))
plt.title("Chinese Shibor/3M Swap Discout Factor curve", fontsize=16)
plt.plot(py_dates, bid_df)
plt.plot(py_dates, mid_df)
plt.plot(py_dates, offer_df)
plt.legend(['bid', 'mid', 'offer'])
%%time
bid_forward = [bid_curve.forwardRate(d, d, Actual360(), Continuous, Annual, True).rate() * 100. for d in pillars]
mid_forward = [mid_curve.forwardRate(d, d, Actual360(), Continuous, Annual, True).rate() * 100. for d in pillars]
offer_forward = [offer_curve.forwardRate(d, d, Actual360(), Continuous, Annual, True).rate() * 100. for d in pillars]
plt.figure(figsize=(12, 6))
plt.title("Chinese Shibor/3M Swap Forward Rate curve", fontsize=16)
plt.plot(py_dates, bid_forward)
plt.plot(py_dates, mid_forward)
plt.plot(py_dates, offer_forward)
plt.legend(['bid', 'mid', 'offer'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 市场数据
Step2: 构造收益率曲线
Step3: 获取收益率曲线数据
|
13,223
|
<ASSISTANT_TASK:>
Python Code:
2+2
from gensim.models import Word2Vec
import gensim
model = gensim.models.KeyedVectors.load_word2vec_format(
'./private_data/GoogleNews-vectors-negative300.bin', binary=True)
# What does a word vector look like?
model.word_vec("cheeseburger")
cheeseburger = model.word_vec("cheeseburger")
hamburger = model.word_vec("hamburger")
corvette = model.word_vec("corvette")
# Numpy is a linear algebra library that we can use to help us efficiently find the dot product and
# vector magnitude (much like the distance formula or Pythagorean theorem)
import numpy as np
# How similar are the words "cheeseburger" and "hamburger"?
numerator = np.dot(cheeseburger,hamburger)
denominator = np.sqrt(np.sum(cheeseburger**2)) * np.sqrt(np.sum(hamburger**2))
print(numerator/denominator)
# How similar are the words "cheeseburger" and "corvette"?
numerator = np.dot(cheeseburger,corvette)
denominator = np.sqrt(np.sum(cheeseburger**2)) * np.sqrt(np.sum(corvette**2))
print(numerator/denominator)
vector_a = np.array([1,0])
vector_b = np.array([0,.5])
average_a_b = (vector_a+vector_b )/2
print(average_a_b)
import pandas as pd
tweets = pd.read_csv('./private_data/no_names.csv')
tweets.head()
from gensim.models import Word2Vec
import gensim
morton_model = gensim.models.Word2Vec.load('./private_data/morton_model')
# Just for fun. What has our model learned from Twitter?
morton_model.wv.most_similar(['donald'])
morton_model.wv.most_similar(['rt'])
morton_model.wv.most_similar(['crazy'])
from embeddings import embeddings
# Let's start by adding a 300-D word vector for every Tweet you guys sent out.
tweets1 = embeddings.append_word_vector_cols(tweets,morton_model,text_col='text')
tweets1.head()
embeddings.most_similar_one_class(tweets1,"1st")
embeddings.most_similar_one_class(tweets1,"2nd")
embeddings.most_similar_one_class(tweets1,"3rd")
embeddings.most_similar_one_class(tweets1,"4th")
embeddings.most_similar_one_class(tweets1,"6th")
embeddings.most_similar_one_class(tweets1,"7th")
tweets2 = embeddings.append_word_vector_cols(tweets, model, keyed_vec=True, text_col='text')
tweets2.head()
embeddings.most_similar_one_class(tweets2,"1st")
embeddings.most_similar_one_class(tweets2,"2nd")
embeddings.most_similar_one_class(tweets2,"3rd")
embeddings.most_similar_one_class(tweets2,"4th")
embeddings.most_similar_one_class(tweets2,"6th")
embeddings.most_similar_one_class(tweets2,"7th")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The notebook understand that I was asking for the execution of a command. OK, with that concept of what you are looking at out of the way, let's move on.
Step2: Comparing with Cosine Similarity
Step3: As we would hope, cheeseburgers and hamburgers are pretty similar, while cheeseburgers and corvettes are not.
Step4: I've written some code that will find the average word vector (or "embedding") for a particular Tweet with our 300-D vectors.
Step5: Our model does not have the vocabulary of Google News, but it has clearly learned a few interesting relationships.
Step6: First, Let's Look at Our Model Build From Twitter Data
Step7: Now Let's Look at the Model Trained with Google News Data
|
13,224
|
<ASSISTANT_TASK:>
Python Code:
problem = lambda m, i: 0 < m < i
no_problem = lambda m, i: not problem(m, i) and not problem(3 - m, 3 - i)
def next_states(state):
m, i, b = state
if b == 1:
return { (m-mb, i-ib, 0) for mb in range(m+1)
for ib in range(i+1)
if 1 <= mb + ib <= 2 and no_problem(m-mb, i-ib)
}
else:
return { (m+mb, i+ib, 1) for mb in range(3-m+1)
for ib in range(3-i+1)
if 1 <= mb + ib <= 2 and no_problem(m+mb, i+ib)
}
start = (3, 3, 1)
goal = (0, 0, 0)
import graphviz as gv
def tripleToStr(t):
return '(' + str(t[0]) + ',' + str(t[1]) + ',' + str(t[2]) + ')'
def dot_graph(R):
This function takes binary relation R as inputs and shows this relation as
a graph using the module graphviz.
dot = gv.Digraph()
dot.attr(rankdir='LR')
Nodes = { tripleToStr(a) for (a,b) in R } | { tripleToStr(b) for (a,b) in R }
for n in Nodes:
dot.node(n)
for (x, y) in R:
dot.edge(tripleToStr(x), tripleToStr(y))
return dot
def createRelation(start):
oldM = set()
M = { start }
R = set()
while True:
oldM = M.copy()
M |= { y for x in M
for y in next_states(x)
}
if M == oldM:
break
return { (x, y) for x in M
for y in next_states(x)
}
def printPath(Path):
print("Solution:\n")
for i in range(len(Path) - 1):
m1, k1, b1 = Path[i]
m2, k2, b2 = Path[i+1]
printState(m1, k1, b1)
printBoat(m1, k1, b1, m2, k2, b2)
m, k, b = Path[-1]
printState(m, k, b)
def printState(m, k, b):
print( fillCharsRight(m * "M", 6) +
fillCharsRight(k * "K", 6) +
fillCharsRight(b * "B", 3) + " |~~~~~| " +
fillCharsLeft((3 - m) * "M", 6) +
fillCharsLeft((3 - k) * "K", 6) +
fillCharsLeft((1 - b) * "B", 3)
)
def printBoat(m1, k1, b1, m2, k2, b2):
if b1 == 1:
if m1 < m2:
print("Error in printBoat: negative number of missionaries in the boat!")
return
if k1 < k2:
print("Error in printBoat: negative number of infidels in the boat!")
return
print(19*" " + "> " + fillCharsBoth((m1-m2)*"M" + " " + (k1-k2)*"K", 3) + " >")
else:
if m1 > m2:
print("Error in printBoat: negative number of missionaries in the boat!")
return
if k1 > k2:
print("Error in printBoat: negative number of infidels in the boat!")
return
print(19*" " + "< " + fillCharsBoth((m2-m1)*"M" + " " + (k2-k1)*"K", 3) + " <")
def fillCharsLeft(x, n):
s = str(x)
m = n - len(s)
return m * " " + s
def fillCharsRight(x, n):
s = str(x)
m = n - len(s)
return s + m * " "
def fillCharsBoth(x, n):
s = str(x)
ml = (n - len(s)) // 2
mr = (n + 1 - len(s)) // 2
return ml * " " + s + mr * " "
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: $\texttt{no_problem}(m, i)$ is true if there is no problem on either side.
Step2: A state is represented as a triple. The triple $(m, i, b)$ specifies that there are
Step3: Initially, all missionaries, all infidels and the boat are on the left shore.
Step4: In order to compute a solution of this search problem, we have to %run the notebook Breadth-First-Search.iypnb.
Step6: The function dot_graph(R) turns a given binary relation R into a graph.
Step7: The function call createRelation(start) computes the transition relation. It assumes that all states are reachable from start.
Step8: The function call printPath(Path) prints the solution of the search problem.
|
13,225
|
<ASSISTANT_TASK:>
Python Code:
df = pd.read_csv('Iris.csv')
df.head()
x_data = df.iloc[:, :-1].values.astype(np.float32)
y_datalabel = df.iloc[:, -1]
y_data = LabelEncoder().fit_transform(df.iloc[:, -1])
onehot = np.zeros((y_data.shape[0], np.unique(y_data).shape[0]))
for i in range(y_data.shape[0]):
onehot[i, y_data[i]] = 1.0
x_train, x_test, y_train, y_test, y_train_label, y_test_label = train_test_split(x_data, onehot, y_data, test_size = 0.2)
def neural_network(num_hidden, size_layer, learning_rate, dropout_rate, beta, activation, batch_size = 16):
def activate(activation, first_layer, second_layer, bias):
if activation == 0:
activation = tf.nn.sigmoid
elif activation == 1:
activation = tf.nn.tanh
else:
activation = tf.nn.relu
layer = activation(tf.matmul(first_layer, second_layer) + bias)
return tf.nn.dropout(layer, dropout_rate)
tf.reset_default_graph()
X = tf.placeholder(tf.float32, (None, x_data.shape[1]))
Y = tf.placeholder(tf.float32, (None, onehot.shape[1]))
input_layer = tf.Variable(tf.random_normal([x_data.shape[1], size_layer]))
biased_layer = tf.Variable(tf.random_normal([size_layer], stddev = 0.1))
output_layer = tf.Variable(tf.random_normal([size_layer, onehot.shape[1]]))
biased_output = tf.Variable(tf.random_normal([onehot.shape[1]], stddev = 0.1))
layers, biased = [], []
for i in range(num_hidden - 1):
layers.append(tf.Variable(tf.random_normal([size_layer, size_layer])))
biased.append(tf.Variable(tf.random_normal([size_layer])))
first_l = activate(activation, X, input_layer, biased_layer)
next_l = activate(activation, first_l, layers[0], biased[0])
for i in range(1, num_hidden - 1):
next_l = activate(activation, next_l, layers[i], biased[i])
last_l = tf.matmul(next_l, output_layer) + biased_output
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = last_l, labels = Y))
regularizers = tf.nn.l2_loss(input_layer) + sum(map(lambda x: tf.nn.l2_loss(x), layers)) + tf.nn.l2_loss(output_layer)
cost = cost + beta * regularizers
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(last_l, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
COST, TEST_COST, ACC, TEST_ACC = [], [], [], []
for i in range(100):
train_acc, train_loss = 0, 0
for n in range(0, (x_train.shape[0] // batch_size) * batch_size, batch_size):
_, loss = sess.run([optimizer, cost], feed_dict = {X: x_train[n: n + batch_size, :], Y: y_train[n: n + batch_size, :]})
train_acc += sess.run(accuracy, feed_dict = {X: x_train[n: n + batch_size, :], Y: y_train[n: n + batch_size, :]})
train_loss += loss
TEST_COST.append(sess.run(cost, feed_dict = {X: x_test, Y: y_test}))
TEST_ACC.append(sess.run(accuracy, feed_dict = {X: x_test, Y: y_test}))
train_loss /= (x_train.shape[0] // batch_size)
train_acc /= (x_train.shape[0] // batch_size)
ACC.append(train_acc)
COST.append(train_loss)
COST = np.array(COST).mean()
TEST_COST = np.array(TEST_COST).mean()
ACC = np.array(ACC).mean()
TEST_ACC = np.array(TEST_ACC).mean()
return COST, TEST_COST, ACC, TEST_ACC
def generate_nn(num_hidden, size_layer, learning_rate, dropout_rate, beta, activation):
global accbest
param = {
'num_hidden' : int(np.around(num_hidden)),
'size_layer' : int(np.around(size_layer)),
'learning_rate' : max(min(learning_rate, 1), 0.0001),
'dropout_rate' : max(min(dropout_rate, 0.99), 0),
'beta' : max(min(beta, 0.5), 0.000001),
'activation': int(np.around(activation))
}
print("\nSearch parameters %s" % (param), file = log_file)
log_file.flush()
learning_cost, valid_cost, learning_acc, valid_acc = neural_network(**param)
print("stop after 200 iteration with train cost %f, valid cost %f, train acc %f, valid acc %f" % (learning_cost, valid_cost, learning_acc, valid_acc))
if (valid_acc > accbest):
costbest = valid_acc
return valid_acc
log_file = open('nn-bayesian.log', 'a')
accbest = 0.0
NN_BAYESIAN = BayesianOptimization(generate_nn,
{'num_hidden': (2, 20),
'size_layer': (32, 1024),
'learning_rate': (0.0001, 1),
'dropout_rate': (0.1, 0.99),
'beta': (0.000001, 0.49),
'activation': (0, 2)
})
NN_BAYESIAN.maximize(init_points = 30, n_iter = 50, acq = 'ei', xi = 0.0)
print('Maximum NN accuracy value: %f' % NN_BAYESIAN.res['max']['max_val'])
print('Best NN parameters: ', NN_BAYESIAN.res['max']['max_params'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ```text
Step2: text
|
13,226
|
<ASSISTANT_TASK:>
Python Code:
from pliers.extractors import FaceRecognitionFaceLocationsExtractor
# A picture of Barack Obama
image = join(get_test_data_path(), 'image', 'obama.jpg')
# Initialize Extractor
ext = FaceRecognitionFaceLocationsExtractor()
# Apply Extractor to image
result = ext.transform(image)
result.to_df()
from pliers.extractors import FaceRecognitionFaceLocationsExtractor, merge_results
images = ['apple.jpg', 'obama.jpg', 'thai_people.jpg']
images = [join(get_test_data_path(), 'image', img) for img in images]
ext = FaceRecognitionFaceLocationsExtractor()
results = ext.transform(images)
df = merge_results(results)
df
from pliers.extractors import GoogleVisionAPIFaceExtractor
ext = GoogleVisionAPIFaceExtractor()
image = join(get_test_data_path(), 'image', 'obama.jpg')
result = ext.transform(image)
result.to_df(format='long', timing=False, object_id=False)
from pliers.stimuli import TextStim, ComplexTextStim
from pliers.extractors import VADERSentimentExtractor, merge_results
raw = We're not claiming that VADER is a very good sentiment analysis tool.
Sentiment analysis is a really, really difficult problem. But just to make a
point, here are some clearly valenced words: disgusting, wonderful, poop,
sunshine, smile.
# First example: we treat all text as part of a single token
text = TextStim(text=raw)
ext = VADERSentimentExtractor()
results = ext.transform(text)
results.to_df()
# Second example: we construct a ComplexTextStim, which will
# cause each word to be represented as a separate TextStim.
text = ComplexTextStim(text=raw)
ext = VADERSentimentExtractor()
results = ext.transform(text)
# Because results is a list of ExtractorResult objects
# (one per word), we need to merge the results explicitly.
df = merge_results(results, object_id=False)
df.head(10)
from pliers.extractors import ChromaSTFTExtractor
audio = join(get_test_data_path(), 'audio', 'barber.wav')
# Audio is sampled at 11KHz; let's compute power in 1 sec bins
ext = ChromaSTFTExtractor(hop_length=11025)
result = ext.transform(audio).to_df()
result.head(10)
# And a plot of the chromagram...
plt.imshow(result.iloc[:, 4:].values.T, aspect='auto')
audio = join(get_test_data_path(), 'audio', 'homer.wav')
ext = VADERSentimentExtractor()
result = ext.transform(audio)
df = merge_results(result, object_id=False)
df
from pliers.filters import FrameSamplingFilter
from pliers.extractors import ClarifaiAPIImageExtractor, merge_results
video = join(get_test_data_path(), 'video', 'small.mp4')
# Sample 2 frames per second
sampler = FrameSamplingFilter(hertz=2)
frames = sampler.transform(video)
ext = ClarifaiAPIImageExtractor()
results = ext.transform(frames)
df = merge_results(results, )
df
from pliers.tests.utils import get_test_data_path
from os.path import join
from pliers.filters import FrameSamplingFilter
from pliers.converters import GoogleSpeechAPIConverter
from pliers.extractors import (ClarifaiAPIImageExtractor, GoogleVisionAPIFaceExtractor,
ComplexTextExtractor, PredefinedDictionaryExtractor,
STFTAudioExtractor, VADERSentimentExtractor,
merge_results)
video = join(get_test_data_path(), 'video', 'obama_speech.mp4')
# Store all the returned features in a single list (nested lists
# are fine, the merge_results function will flatten everything)
features = []
# Sample video frames and apply the image-based extractors
sampler = FrameSamplingFilter(every=10)
frames = sampler.transform(video)
obj_ext = ClarifaiAPIImageExtractor()
obj_features = obj_ext.transform(frames)
features.append(obj_features)
face_ext = GoogleVisionAPIFaceExtractor()
face_features = face_ext.transform(frames)
features.append(face_features)
# Power in speech frequencies
stft_ext = STFTAudioExtractor(freq_bins=[(100, 300)])
speech_features = stft_ext.transform(video)
features.append(speech_features)
# Explicitly transcribe the video--we could also skip this step
# and it would be done implicitly, but this way we can specify
# that we want to use the Google Cloud Speech API rather than
# the package default (IBM Watson)
text_conv = GoogleSpeechAPIConverter()
text = text_conv.transform(video)
# Text-based features
text_ext = ComplexTextExtractor()
text_features = text_ext.transform(text)
features.append(text_features)
dict_ext = PredefinedDictionaryExtractor(
variables=['affect/V.Mean.Sum', 'subtlexusfrequency/Lg10WF'])
norm_features = dict_ext.transform(text)
features.append(norm_features)
sent_ext = VADERSentimentExtractor()
sent_features = sent_ext.transform(text)
features.append(sent_features)
# Ask for data in 'long' format, and code extractor name as a separate
# column instead of prepending it to feature names.
df = merge_results(features, format='long', extractor_names='column')
# Output rows in a sensible order
df.sort_values(['extractor', 'feature', 'onset', 'duration', 'order']).head(10)
from pliers.tests.utils import get_test_data_path
from os.path import join
from pliers.graph import Graph
from pliers.filters import FrameSamplingFilter
from pliers.extractors import (PredefinedDictionaryExtractor, STFTAudioExtractor,
merge_results)
video = join(get_test_data_path(), 'video', 'obama_speech.mp4')
# Define nodes
nodes = [
(FrameSamplingFilter(every=10),
['ClarifaiAPIImageExtractor', 'GoogleVisionAPIFaceExtractor']),
(STFTAudioExtractor(freq_bins=[(100, 300)])),
('GoogleSpeechAPIConverter',
['ComplexTextExtractor',
PredefinedDictionaryExtractor(['affect/V.Mean.Sum',
'subtlexusfrequency/Lg10WF']),
'VADERSentimentExtractor'])
]
# Initialize and execute Graph
g = Graph(nodes)
# Arguments to merge_results can be passed in here
df = g.transform(video, format='long', extractor_names='column')
# Output rows in a sensible order
df.sort_values(['extractor', 'feature', 'onset', 'duration', 'order']).head(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Face detection with multiple inputs
Step2: Note how the merged pandas DataFrame contains 5 rows, even though there were only 3 input images. The reason is that there are 5 detected faces across the inputs (0 in the first image, 1 in the second, and 4 in the third). You can discern the original sources from the stim_name and source_file columns.
Step4: Notice that the output in this case contains many more features. That's because the Google face recognition service gives us back a lot more information than just the location of the face within the image. Also, the example illustrates our ability to control the format of the output, by returning the data in "long" format, and suppressing output of columns that are uninformative in this context.
Step5: Analyze each word individually
Step6: Extract chromagram from an audio clip
Step7: Sentiment analysis on speech transcribed from audio
Step8: Object recognition on selectively sampled video frames
Step9: The resulting data frame has 41 columns (!), most of which are individual object labels like 'lego', 'toy', etc., selected for us by the Clarifai API on the basis of the content detected in the video (we could have also forced the API to return values for specific labels).
Step10: The resulting pandas DataFrame is quite large; even for our 9-second video, we get back over 3,000 rows! Importantly, though, the DataFrame contains all kinds of metadata that makes it easy to filter and sort the results in whatever way we might want to (e.g., we can filter on the extractor, stim class, onset or duration, etc.).
|
13,227
|
<ASSISTANT_TASK:>
Python Code:
import os
import tensorflow as tf
import numpy as np
from google.cloud import bigquery
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# do not change these
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.8'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
def write_list_to_disk(my_list, filename):
with open(filename, 'w') as f:
for item in my_list:
line = "%s\n" % item
f.write(str(line.encode('utf8')))
sql=
#standardSQL
SELECT
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
GROUP BY
content_id
content_ids_list = bigquery.Client().query(sql).to_dataframe()['content_id'].tolist()
write_list_to_disk(content_ids_list, "content_ids.txt")
print("Some sample content IDs {}".format(content_ids_list[:3]))
print("The total number of articles is {}".format(len(content_ids_list)))
sql=
TODO: Modify the query above to instead create a list of all categories in the dataset.
You'll need to change the content_id to the appropriate field as well as the index.
categories_list = #TODO: Modify the query above to create the list of categories
write_list_to_disk(categories_list, "categories.txt")
print(categories_list)
sql=
#standardSQL
SELECT
REGEXP_EXTRACT((SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)), r"^[^,]+") AS first_author
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
GROUP BY
first_author
authors_list = bigquery.Client().query(sql).to_dataframe()['first_author'].tolist()
write_list_to_disk(authors_list, "authors.txt")
print("Some sample authors {}".format(authors_list[:10]))
print("The total number of authors is {}".format(len(authors_list)))
sql=
WITH site_history as (
SELECT
fullVisitorId as visitor_id,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id,
(SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category,
(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title,
(SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) AS author_list,
SPLIT(RPAD((SELECT MAX(IF(index=4, value, NULL)) FROM UNNEST(hits.customDimensions)), 7), '.') as year_month_array,
LEAD(hits.customDimensions, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) as nextCustomDimensions
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND
fullVisitorId IS NOT NULL
AND
hits.time != 0
AND
hits.time IS NOT NULL
AND
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
)
SELECT
visitor_id,
content_id,
category,
REGEXP_REPLACE(title, r",", "") as title,
REGEXP_EXTRACT(author_list, r"^[^,]+") as author,
DATE_DIFF(DATE(CAST(year_month_array[OFFSET(0)] AS INT64), CAST(year_month_array[OFFSET(1)] AS INT64), 1), DATE(1970,1,1), MONTH) as months_since_epoch,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) as next_content_id
FROM
site_history
WHERE (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) IS NOT NULL AND
TODO: Use FARM_FINGERPRINT on the concatenated visitor_id and content_id to create a training set of approximately 90% of the data
training_set_df = bigquery.Client().query(sql).to_dataframe()
training_set_df.to_csv('training_set.csv', header=False, index=False, encoding='utf-8')
training_set_df.head()
sql=
WITH site_history as (
SELECT
fullVisitorId as visitor_id,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id,
(SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category,
(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title,
(SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) AS author_list,
SPLIT(RPAD((SELECT MAX(IF(index=4, value, NULL)) FROM UNNEST(hits.customDimensions)), 7), '.') as year_month_array,
LEAD(hits.customDimensions, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) as nextCustomDimensions
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND
fullVisitorId IS NOT NULL
AND
hits.time != 0
AND
hits.time IS NOT NULL
AND
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
)
SELECT
visitor_id,
content_id,
category,
REGEXP_REPLACE(title, r",", "") as title,
REGEXP_EXTRACT(author_list, r"^[^,]+") as author,
DATE_DIFF(DATE(CAST(year_month_array[OFFSET(0)] AS INT64), CAST(year_month_array[OFFSET(1)] AS INT64), 1), DATE(1970,1,1), MONTH) as months_since_epoch,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) as next_content_id
FROM
site_history
WHERE (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) IS NOT NULL AND
#TODO: Modify the FARM_FINGERPRINT you used in the previous cell to create a test set of approximately 10% of the data
test_set_df = bbigquery.Client().query(sql).to_dataframe()
test_set_df.to_csv('test_set.csv', header=False, index=False, encoding='utf-8')
test_set_df.head()
%%bash
wc -l *_set.csv
!head *_set.csv
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will use this helper funciton to write lists containing article ids, categories, and authors for each article in our database to local file.
Step3: Pull data from BigQuery
Step5: In the following cells, you will create a local file which contains a list of article categories and a list of article authors.
Step7: When creating the author list, we'll only use the first author information for each article.
Step9: Create train and test sets.
Step11: Repeat the query as above but change outcome of the farm fingerprint hash to collect the remaining 10% of the data for the test set.
Step12: Let's have a look at the two csv files we just created containing the training and test set. We'll also do a line count of both files to confirm that we have achieved an approximate 90/10 train/test split.
|
13,228
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import sys
sys.path.append("../../")
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import tabulate
import pprint
import click
import numpy as np
import pandas as pd
from ray.tune.commands import *
from dynamic_sparse.common.browser import *
exps = ['neurips_debug_test13', ]
paths = [os.path.expanduser("~/nta/results/{}".format(e)) for e in exps]
df = load_many(paths)
df.head(5)
# replace hebbian prine
df['hebbian_prune_perc'] = df['hebbian_prune_perc'].replace(np.nan, 0.0, regex=True)
df['weight_prune_perc'] = df['weight_prune_perc'].replace(np.nan, 0.0, regex=True)
df.columns
df.shape
df.iloc[1]
df.groupby('model')['model'].count()
# Did any trials failed?
df[df["epochs"]<30]["epochs"].count()
# Removing failed or incomplete trials
df_origin = df.copy()
df = df_origin[df_origin["epochs"]>=30]
df.shape
# which ones failed?
# failed, or still ongoing?
df_origin['failed'] = df_origin["epochs"]<30
df_origin[df_origin['failed']]['epochs']
# helper functions
def mean_and_std(s):
return "{:.3f} ± {:.3f}".format(s.mean(), s.std())
def round_mean(s):
return "{:.0f}".format(round(s.mean()))
stats = ['min', 'max', 'mean', 'std']
def agg(columns, filter=None, round=3):
if filter is None:
return (df.groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'val_acc_last': stats,
'model': ['count']})).round(round)
else:
return (df[filter].groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'val_acc_last': stats,
'model': ['count']})).round(round)
agg(['hebbian_grow'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and check data
Step2: ## Analysis
Step3: What are optimal levels of hebbian and weight pruning
|
13,229
|
<ASSISTANT_TASK:>
Python Code:
from robots.robots import Robot
from numpy import pi
params = [[ "l1", 0, 0, "q1"]]
robot1 = Robot("Pendulo simple", "R", [0.4], [0], params, "cinematico")
robot1.inicializar_puertos()
%matplotlib widget
robot1.visualizador()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Se definen los parametros DH del manipulador a visualizar, se le da un nombre, se define el tipo de articulaciones que tiene el manipulador, las longitudes, angulos en dos arreglos y el modo de graficación.
Step2: Se inicializan los puertos en los que tiene que escuchar los mensajes
Step3: Se manda llamar al visualizador del robot.
|
13,230
|
<ASSISTANT_TASK:>
Python Code:
#Initialization of iPython, some helper functions.
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use('ggplot')
from sympy import *
from IPython.display import display, Math, Latex
init_printing(use_latex="mathjax")
_Omega=u'\u03A9'
def readCsvData(fileName):
return np.genfromtxt(fileName, delimiter=';')
def plotData(data, timebase=1, xlabel="", ylabel="",names=[]):
fig=plt.figure(figsize=[10,5])
l = np.size(data,0)
time = np.linspace(0,l-1,l)*timebase
if np.ndim(data)>1: # If more than one vector to plot
count = np.size(data,1)
for i in range(count):
plt.plot(time, data[:,i], label=names[i])
else:
plt.plot(time, data, label=names[0])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.plot()
def plotCsv(fileName, timebase=1, xlabel="", ylabel="",names=[]):
data = readCsvData(fileName)
plotData(data, timebase, xlabel, ylabel,names)
# Tip rated power [W]
Ptip = 40
# Supply voltage [minimum, typical, maximum] [V]
Vsupply = [6, 12, 20]
Ptip_min=(Vsupply[0]**2)/(Vsupply[1]**2)*40
print(Ptip_min,"W")
Tambient = [0, 40] # [minimum, maximum] *C
Ttip = [200, 350] # [minimum, maximum] *C
# Nominal resistance of the tip heater
Rtip = Vsupply[1]**2/Ptip; print("Rtip = %1.2f %s" % (Rtip, _Omega))
# Nominal heater current at nominal voltage
Itip_nominal = Vsupply[1]/Rtip; print("Itip_nominal = %1.2f A" % Itip_nominal)
# Safety factor
k_safety = 1.1
# Minimum tip current - at minimum voltage, including safety factor
Itip_min = Vsupply[0]/Rtip/k_safety; print("Itip_min = %1.2f A" % Itip_min)
# Peak current - at maximum voltage, including safety factor
Itip_max = Vsupply[2]/Rtip*k_safety; print("Itip_max = %1.2f A" % Itip_max)
# Supply voltage for use as x axis in plots
Vs = np.array([Vsupply[0], Vsupply[2]])
# Minimum k_ilis valuses for supply voltage range
k_ilis_min = np.array([6400, 7900])
# Maximum k_ilis valuses for supply voltage range
k_ilis_max = np.array([16300, 12200])
# Minimum nominal current sense, mA
Is_min = Vs/Rtip/k_safety/k_ilis_max*1000
# Maximum nominal current sense, mA
Is_max = Vs/Rtip*k_safety/k_ilis_min*1000
# Plot graph
plt.rcParams['figure.figsize'] = 12, 5
plt.figure
plt.plot(Vs, Is_min,"k")
plt.plot(Vs, Is_max,"k")
plt.fill_between(Vs, Is_min, Is_max, facecolor='lime')
plt.ylim(0); plt.xlabel("Vsupply [V]"); plt.ylabel("Is [mA]"); plt.show()
# Sense resistor nominal value.
Rs = 470
# Sense resistor tolerance, including thermal coefficient.
Rs_tol = 0.06
# uC power supply voltage range, V
Vcc = 3.3; Vcc_min = 2.97; Vcc_max = 3.63
# Input voltage, min and max
Vs_min = Is_min * Rs / 1000 / (1+Rs_tol)
Vs_max = Is_max * Rs / 1000 * (1+Rs_tol)
# ADC readings, min and max
ADCs_min = Vs_min / Vcc_max * 4096
ADCs_max = Vs_max / Vcc_min * 4096
# Limits are chosen to provide some additional safety
ADC_lim_min = np.floor(ADCs_min * 0.85 - 5)
ADC_lim_max = np.ceil(ADCs_max * 1.15 + 15)
# print limits
print(ADC_lim_min, ADC_lim_max)
# Plot graph
plt.figure
plt.plot(Vs, ADCs_min,"k")
plt.plot(Vs, ADCs_max,"k")
plt.plot(Vs, ADC_lim_min, "r", linewidth=3)
plt.plot(Vs, ADC_lim_max, "r", linewidth=3)
plt.fill_between(Vs, ADCs_min, ADCs_max, facecolor='lime')
plt.text(12, 200, "Nominal")
plt.fill_between(Vs, 0, ADC_lim_min, facecolor='aqua')
plt.text(14, 50, "Open load")
plt.fill_between(Vs, ADC_lim_max, 700, facecolor='orange')
plt.text(10, 500, "Overload")
plt.ylim(0,700); plt.xlabel("Vsupply [V]"); plt.ylabel("ADC reading [LSB]"); plt.show()
# ADC readings for Vin_sense input, only nominal values
ADCvin = np.floor(Vs * 10 / (100 + 10) / Vcc * 4095)
display(ADCvin)
# Coefficient for integer multiply
int_coef = 65536
# Calculate coefficients for lim = a*Vin+b
a_min = int((ADC_lim_min[1] - ADC_lim_min[0])/(ADCvin[1] - ADCvin[0])*int_coef)
b_min = int(ADC_lim_min[1] - a_min/int_coef * ADCvin[1])
a_max = int((ADC_lim_max[1] - ADC_lim_max[0])/(ADCvin[1] - ADCvin[0])*int_coef)
b_max = int(ADC_lim_max[1] - a_max/int_coef * ADCvin[1])
# Display as en equations
display(Math(r"ADClim_{min}=\frac{%d}{%d}*ADC_{Vin}+%d" %(a_min, int_coef, b_min)))
display(Math(r"ADClim_{max}=\frac{%d}{%d}*ADC_{Vin}+%d" %(a_max, int_coef, b_max)))
# Check
display(Math(r"ADC\ limits\ assumed\begin{pmatrix} %d & %d \\ %d & %d \end{pmatrix}" %
(ADC_lim_max[0], ADC_lim_max[1], ADC_lim_min[0], ADC_lim_min[1]) +
r"calculated\begin{pmatrix} %d & %d \\ %d & %d \end{pmatrix}" %
(ADCvin[0] * a_max / int_coef + b_max, ADCvin[1] * a_max / int_coef + b_max,
ADCvin[0] * a_min / int_coef + b_min, ADCvin[1] * a_min / int_coef + b_min)))
display(Math(r'F(k) = \int_{-\infty}^{\infty} f(x) e^{2\pi i k} dx'))
x, y, z, a, b, c, m_real, m_appr, z_1 = symbols("x y z a b c m_real m_appr z_1")
m_real_ = Equality(m_real, sqrt(x**2 + y**2 + z**2)); display(m_real_)
m_appr_ = Equality(m_appr, a*abs(x) + b*abs(y) + c*abs(z)); display(m_appr_)
m_appr_ = m_appr_.subs(a,1).subs(b, 1/2).subs(c, 1/4); display(m_appr_)
Equality(z_1,z/y)
m_real_ = Equality(m_real, sqrt(1 + y**2 + (z_1*y)**2)); display(m_real_)
m_appr_ = m_appr_.subs(x,1).subs(z,z_1*y); display(m_appr_)
plt.rcParams['figure.figsize'] = 15, 10
plotting.plot3d(m_appr_.rhs/m_real_.rhs-1,(y,0,1),(z_1,0,1));plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic assumptions
Step2: Rated tip power (40 W) is defined for typical supply voltage (12 V). For lower supply voltage maximum power is lower than nominal. For minimum power supply
Step3: Device withstands power supply voltage in range of -30..+30 V but is non-operational outside Vsupply range.
Step4: Working range of tip temperature is
Step5: Power supply circuits
Step6: BTS5012SDA has built-in diagnosis and current sense circuits. It can be used to detect that proper heater is connected.
Step7: To calculate ADC readings from current sense circuit sense resistor and ADC errors must be taken into account.
Step8: Vsupply is measured by Vin_sense ADC input. Current sense thresholds related to Vin_sense readings are calculated below. Power supply sense resistors tolerances are neglected. At the end, limits are represented as $ADC_{lim}=a*ADC_{Vin}+b$.
Step9: Sensor amplifier
Step10: To minimize calculations, approximation will be used. Derivative of alpha max plus beta min algorithm will be used.
Step11: Simple values for a, b, c are selected
Step12: Let's calculate error of this formula.
|
13,231
|
<ASSISTANT_TASK:>
Python Code:
# Author: Mathurin Massias <mathurin.massias@gmail.com>
# Yousra Bekhti <yousra.bekhti@gmail.com>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import os.path as op
import mne
from mne.datasets import somato
from mne.inverse_sparse import tf_mixed_norm, make_stc_from_dipoles
from mne.viz import plot_sparse_source_estimates
print(__doc__)
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
fwd_fname = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
'sub-{}_task-{}-fwd.fif'.format(subject, task))
condition = 'Unknown'
# Read evoked
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
reject = dict(grad=4000e-13, eog=350e-6)
picks = mne.pick_types(raw.info, meg=True, eog=True)
event_id, tmin, tmax = 1, -1., 3.
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject, preload=True)
evoked = epochs.filter(1, None).average()
evoked = evoked.pick_types(meg=True)
evoked.crop(tmin=0.008, tmax=0.2)
# Compute noise covariance matrix
cov = mne.compute_covariance(epochs, rank='info', tmax=0.)
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname)
alpha, l1_ratio = 20, 0.05
loose, depth = 1, 0.95
# Use a multiscale time-frequency dictionary
wsize, tstep = [4, 16], [2, 4]
n_tfmxne_iter = 10
# Compute TF-MxNE inverse solution with dipole output
dipoles, residual = tf_mixed_norm(
evoked, forward, cov, alpha=alpha, l1_ratio=l1_ratio,
n_tfmxne_iter=n_tfmxne_iter, loose=loose,
depth=depth, tol=1e-3,
wsize=wsize, tstep=tstep, return_as_dipoles=True,
return_residual=True)
# Crop to remove edges
for dip in dipoles:
dip.crop(tmin=-0.05, tmax=0.3)
evoked.crop(tmin=-0.05, tmax=0.3)
residual.crop(tmin=-0.05, tmax=0.3)
stc = make_stc_from_dipoles(dipoles, forward['src'])
plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
opacity=0.1, fig_name="irTF-MxNE (cond %s)"
% condition)
ylim = dict(grad=[-300, 300])
evoked.pick_types(meg='grad')
evoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim,
proj=True)
residual.pick_types(meg='grad')
residual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim,
proj=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load somatosensory MEG data
Step2: Run iterative reweighted multidict TF-MxNE solver
Step3: Generate stc from dipoles
Step4: Show the evoked response and the residual for gradiometers
|
13,232
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import igraph
import timeit
import itertools
def enumerate_matrix(gmat, i):
def enumerate_adj_list(adj_list, i):
def enumerate_edge_list(edge_list, i):
def do_sim(n):
retlist = []
nrep = 10
nsubrep = 10
# this is (sort of) a Python way of doing the R function "replicate":
for _ in itertools.repeat(None, nrep):
# make a random undirected graph with fixed (average) vertex degree = 5
g = igraph.Graph.Barabasi(n, 5)
# get the graph in three different representations
g_matrix = np.matrix(g.get_adjacency().data)
g_adj_list = g.get_adjlist()
g_edge_list = np.array(g.get_edgelist())
start_time = timeit.default_timer()
for _ in itertools.repeat(None, nsubrep):
for i in range(0, n):
enumerate_matrix(g_matrix, i)
matrix_elapsed = timeit.default_timer() - start_time
start_time = timeit.default_timer()
for _ in itertools.repeat(None, nsubrep):
for i in range(0, n):
enumerate_adj_list(g_adj_list, i)
adjlist_elapsed = timeit.default_timer() - start_time
start_time = timeit.default_timer()
for _ in itertools.repeat(None, nsubrep):
for i in range(0, n):
enumerate_edge_list(g_edge_list, i)
edgelist_elapsed = timeit.default_timer() - start_time
retlist.append([matrix_elapsed, adjlist_elapsed, edgelist_elapsed])
# average over replicates and then
# divide by n so that the running time results are on a per-vertex basis
return np.mean(np.array(retlist), axis=0)/n
do_sim(1000)*1000
do_sim(2000)*1000
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, define a function that returns the index numbers of the neighbors of a vertex i, when the
Step2: Define a function that enumerates the neighbors of a vertex i, when the
Step3: Define a function that enumerates the neighbors of a vertex i, when the
Step4: This next function is the simulation funtion. "n" is the number of vertices.
Step5: A simulation with 1000 vertices clearly shows that adjacency list is fastest
Step6: We see the expected behavior, with the running time for the adjacency-matrix and edge-list formats going up when we increase "n", but there is hardly any change in the running time for the graph stored in adjacency list format
|
13,233
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries
import numpy as np
import pandas as pd
from time import time
from sklearn.metrics import f1_score
# Read student data
student_data = pd.read_csv("student-data.csv")
print "Student data read successfully!"
# TODO: Calculate number of students
n_students = len(student_data)
# TODO: Calculate number of features
n_features = len(student_data.columns) - 1 # The last field is the target and is not a feature
# TODO: Calculate passing students
n_passed = len([x for x in student_data["passed"] if x == "yes"])
# TODO: Calculate failing students
n_failed = n_students - n_passed
# TODO: Calculate graduation rate
grad_rate = 100.0 * n_passed / n_students
# Print the results
print "Total number of students: {}".format(n_students)
print "Number of features: {}".format(n_features)
print "Number of students who passed: {}".format(n_passed)
print "Number of students who failed: {}".format(n_failed)
print "Graduation rate of the class: {:.2f}%".format(grad_rate)
# Extract feature columns
feature_cols = list(student_data.columns[:-1])
# Extract target column 'passed'
target_col = student_data.columns[-1]
# Show the list of columns
print "Feature columns:\n{}".format(feature_cols)
print "\nTarget column: {}".format(target_col)
# Separate the data into feature data and target data (X_all and y_all, respectively)
X_all = student_data[feature_cols]
y_all = student_data[target_col]
# Show the feature information by printing the first five rows
print "\nFeature values:"
print X_all.head()
def preprocess_features(X):
''' Preprocesses the student data and converts non-numeric binary variables into
binary (0/1) variables. Converts categorical variables into dummy variables. '''
# Initialize new output DataFrame
output = pd.DataFrame(index = X.index)
# Investigate each feature column for the data
for col, col_data in X.iteritems():
# If data type is non-numeric, replace all yes/no values with 1/0
if col_data.dtype == object:
col_data = col_data.replace(['yes', 'no'], [1, 0])
# If data type is categorical, convert to dummy variables
if col_data.dtype == object:
# Example: 'school' => 'school_GP' and 'school_MS'
col_data = pd.get_dummies(col_data, prefix = col)
# Collect the revised columns
output = output.join(col_data)
return output
X_all = preprocess_features(X_all)
print "Processed feature columns ({} total features):\n{}".format(len(X_all.columns), list(X_all.columns))
# TODO: Import any additional functionality you may need here
from sklearn.cross_validation import train_test_split
# TODO: Set the number of training points
num_train = 300
# Set the number of testing points
num_test = X_all.shape[0] - num_train
random_state = 0
# TODO: Shuffle and split the dataset into the number of training and testing points above
X_train, X_test, y_train, y_test = train_test_split(
X_all,
y_all,
test_size=num_test,
train_size=num_train,
random_state=random_state
)
# Show the results of the split
print "Training set has {} samples.".format(X_train.shape[0])
print "Testing set has {} samples.".format(X_test.shape[0])
def train_classifier(clf, X_train, y_train):
''' Fits a classifier to the training data. '''
# Start the clock, train the classifier, then stop the clock
start = time()
clf.fit(X_train, y_train)
end = time()
# Print the results
print "Trained model in {:.4f} seconds".format(end - start)
def predict_labels(clf, features, target):
''' Makes predictions using a fit classifier based on F1 score. '''
# Start the clock, make predictions, then stop the clock
start = time()
y_pred = clf.predict(features)
end = time()
# Print and return results
print "Made predictions in {:.4f} seconds.".format(end - start)
return f1_score(target.values, y_pred, pos_label='yes')
def train_predict(clf, X_train, y_train, X_test, y_test):
''' Train and predict using a classifer based on F1 score. '''
# Indicate the classifier and the training set size
print "Training a {} using a training set size of {}. . .".format(clf.__class__.__name__, len(X_train))
# Train the classifier
train_classifier(clf, X_train, y_train)
# Print the results of prediction for both training and testing
print "F1 score for training set: {:.4f}.".format(predict_labels(clf, X_train, y_train))
print "F1 score for test set: {:.4f}.".format(predict_labels(clf, X_test, y_test))
# TODO: Import the three supervised learning models from sklearn
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import AdaBoostClassifier
# TODO: Initialize the three models
clf_A = GaussianNB()
clf_B = LogisticRegression(random_state=14)
clf_C = AdaBoostClassifier(random_state=14)
# TODO: Set up the training set sizes
X_train_100 = X_train[:100]
y_train_100 = y_train[:100]
X_train_200 = X_train[:200]
y_train_200 = y_train[:200]
X_train_300 = X_train[:300]
y_train_300 = y_train[:300]
# TODO: Execute the 'train_predict' function for each classifier and each training set size
# train_predict(clf, X_train, y_train, X_test, y_test)
for clf in [clf_A, clf_B, clf_C]:
for X_train_N, y_train_N in [(X_train_100, y_train_100), (X_train_200, y_train_200), (X_train_300, y_train_300)]:
train_predict(clf, X_train_N, y_train_N, X_test, y_test)
print("")
# TODO: Import 'GridSearchCV' and 'make_scorer'
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import make_scorer
# TODO: Create the parameters list you wish to tune
num_features = len(feature_cols)
parameters = {
"C": [0.5, 1.0, 1.5, 2.0]
}
# TODO: Initialize the classifier
clf = LogisticRegression(random_state=14)
# TODO: Make an f1 scoring function using 'make_scorer'
f1_scorer = make_scorer(f1_score, pos_label="yes")
# TODO: Perform grid search on the classifier using the f1_scorer as the scoring method
grid_obj = GridSearchCV(clf, param_grid=parameters, scoring = f1_scorer)
# TODO: Fit the grid search object to the training data and find the optimal parameters
grid_obj = grid_obj.fit(X_train, y_train)
# Get the estimator
clf = grid_obj.best_estimator_
# Report the final F1 score for training and testing after parameter tuning
print "Tuned model has a training F1 score of {:.4f}.".format(predict_labels(clf, X_train, y_train))
print "Tuned model has a testing F1 score of {:.4f}.".format(predict_labels(clf, X_test, y_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementation
Step2: Preparing the Data
Step3: Preprocess Feature Columns
Step4: Implementation
Step5: Training and Evaluating Models
Step6: Implementation
Step7: Tabular Results
|
13,234
|
<ASSISTANT_TASK:>
Python Code:
import os
try:
os.mkdir('me')
except OSError:
pass
os.chdir('me')
%%bash
echo 'How does our directory look like?'
ls -al
git init
echo 'How does our directory look like now?'
ls -al
git status
# Let us create files for tracking.
echo 'My Project' > README
echo 'peisenha' > CONTRIBUTING.md
# Set up our identity
git config --global user.name "Philipp Eisenhauer"
git config --global user.email "eisenhauer@policy-lab.org"
%%bash
git add README
git status
%%bash
git add CONTRIBUTING.md
git commit -a -m'initial'
%%bash
echo 'Philipp Eisenhauer' > CONTRIBUTING.md
%%bash
git status
%%bash
git commit -a -m'changes to contributers'
%%bash
# Remove files
git rm README
git status
%%bash
# Move files
git mv CONTRIBUTING.md CONTRIBUTORS.md
git status
%%bash
git commit -m'reorganizing files'
%%bash
git log
%%bash
git remote add origin git@github.com:peisenha/try_git.git
git push -u origin master
%%bash
git clone git@github.com:peisenha/try_git.git ../other
%%bash
git clone git@github.com:softEcon/course.git /home/vagrant/course_material
%%bash
# Create tag
git tag -a v1.0 -m'ouf first tag annotated tag'
%%bash
# List tag
git tag -l
%%bash
# Push to remote repository
git commit -a m'added first tag'
git push origin --tags
%%bash
git branch testing
%%bash
git branch -a
%%bash
git log --oneline --decorate
%%bash
git checkout testing
echo 'Some information on the project.' > README
git add README
git commit -a -m'adding project information'
%%bash
git checkout master
%%bash
git checkout testing
%%bash
git checkout master
echo 'Some other information on the project.' > README
git add README
git commit -a -m'adding other project information'
%%bash
git merge master testing
%%bash
git add README
git commit -m'fixed merge conflict'
%%bash
git branch -d testing
git branch -a
import shutil
os.chdir('../')
for dir_ in ['me', '.ipynb_checkpoints', 'other']:
try:
shutil.rmtree(dir_)
except OSError:
pass
import urllib; from IPython.core.display import HTML
HTML(urllib.urlopen('http://bit.ly/1K5apRH').read())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting Up
Step2: Recording Changes to the Repository
Step3: Removing and Moving File
Step4: Viewing the Commit History
Step5: Adding a Remote Repository
Step6: While we are at it, let us check out the class repository.
Step7: From now on, you will find all class material in this repository.
Step8: Let us take a look at our repository in a repository viewer. We choose gitg and can install it using the Ubuntu Software Center.
Step9: Switching between Branches
Step10: Let us check how the content of the directory change as we move between the different branches.
Step11: Basic Merging
Step12: Let us clean up our branches by deleting testing and keep our repository in good standing.
Step13: Summary
Step14: Miscellaneous
|
13,235
|
<ASSISTANT_TASK:>
Python Code:
#importar los paquetes que se van a usar
import pandas as pd
import pandas_datareader.data as web
import numpy as np
import datetime
from datetime import datetime
import scipy.stats as stats
import scipy as sp
import scipy.optimize as scopt
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
#algunas opciones para Python
pd.set_option('display.notebook_repr_html', True)
pd.set_option('display.max_columns', 6)
pd.set_option('display.max_rows', 10)
pd.set_option('display.width', 78)
pd.set_option('precision', 3)
def def_portafolio(tickers, participacion=None):
if (participacion is None):
part = np.ones(len(tickers))/len(tickers)
portfolio = pd.DataFrame({'Tickers': tickers, 'Participacion': participacion}, index=tickers)
return portfolio
portafolio = def_portafolio(['Acción A', 'Acción B'], [1, 1])
portafolio
rendimientos = pd.DataFrame({'Acción A': [0.1, 0.24, 0.05, -0.02, 0.2],
'Acción B': [-0.15, -0.2, -0.01, 0.04, -0.15]})
rendimientos
def valor_portafolio_ponderado(portafolio, rendimientos, name='Valor'):
total_participacion = portafolio.Participacion.sum()
ponderaciones=portafolio.Participacion/total_participacion
rendimientos_ponderados = rendimientos*ponderaciones
return pd.DataFrame({name: rendimientos_ponderados.sum(axis=1)})
rend_portafolio=valor_portafolio_ponderado(portafolio, rendimientos, 'Valor')
rend_portafolio
total_rend=pd.concat([rendimientos, rend_portafolio], axis=1)
total_rend
total_rend.std()
rendimientos.corr()
total_rend.plot(figsize=(8,6));
def plot_portafolio_rend(rend, title=None):
rend.plot(figsize=(8,6))
plt.xlabel('Año')
plt.ylabel('Rendimientos')
if (title is not None): plt.title(title)
plt.show()
plot_portafolio_rend(total_rend);
def get_historical_closes(ticker, start_date, end_date):
p = web.DataReader(ticker, "yahoo", start_date, end_date).sort_index('major_axis')
d = p.to_frame()['Adj Close'].reset_index()
d.rename(columns={'minor': 'Ticker', 'Adj Close': 'Close'}, inplace=True)
pivoted = d.pivot(index='Date', columns='Ticker')
pivoted.columns = pivoted.columns.droplevel(0)
return pivoted
closes=get_historical_closes(['AA','AAPL','MSFT','KO'], '2010-01-01', '2016-12-31')
closes
closes.plot(figsize=(8,6));
def calc_daily_returns(closes):
return np.log(closes/closes.shift(1))[1:]
daily_returns=calc_daily_returns(closes)
daily_returns.plot(figsize=(8,6));
daily_returns.corr()
def calc_annual_returns(daily_returns):
grouped = np.exp(daily_returns.groupby(lambda date: date.year).sum())-1
return grouped
annual_returns = calc_annual_returns(daily_returns)
annual_returns
def calc_portfolio_var(returns, weights=None):
if (weights is None):
weights = np.ones(returns.columns.size)/returns.columns.size
sigma = np.cov(returns.T,ddof=0)
var = (weights * sigma * weights.T).sum()
return var
calc_portfolio_var(annual_returns)
def sharpe_ratio(returns, weights = None, risk_free_rate = 0.015):
n = returns.columns.size
if weights is None: weights = np.ones(n)/n
var = calc_portfolio_var(returns, weights)
means = returns.mean()
return (means.dot(weights) - risk_free_rate)/np.sqrt(var)
sharpe_ratio(annual_returns)
def f(x): return 2+x**2
scopt.fmin(f, 10)
def negative_sharpe_ratio_n_minus_1_stock(weights,returns,risk_free_rate):
Given n-1 weights, return a negative sharpe ratio
weights2 = sp.append(weights, 1-np.sum(weights))
return -sharpe_ratio(returns, weights2, risk_free_rate)
def optimize_portfolio(returns, risk_free_rate):
w0 = np.ones(returns.columns.size-1, dtype=float) * 1.0 / returns.columns.size
w1 = scopt.fmin(negative_sharpe_ratio_n_minus_1_stock, w0, args=(returns, risk_free_rate))
final_w = sp.append(w1, 1 - np.sum(w1))
final_sharpe = sharpe_ratio(returns, final_w, risk_free_rate)
return (final_w, final_sharpe)
optimize_portfolio(annual_returns, 0.0003)
def objfun(W, R, target_ret):
stock_mean = np.mean(R,axis=0)
port_mean = np.dot(W,stock_mean)
cov=np.cov(R.T)
port_var = np.dot(np.dot(W,cov),W.T)
penalty = 2000*abs(port_mean-target_ret)
return np.sqrt(port_var) + penalty
def calc_efficient_frontier(returns):
result_means = []
result_stds = []
result_weights = []
means = returns.mean()
min_mean, max_mean = means.min(), means.max()
nstocks = returns.columns.size
for r in np.linspace(min_mean, max_mean, 150):
weights = np.ones(nstocks)/nstocks
bounds = [(0,1) for i in np.arange(nstocks)]
constraints = ({'type': 'eq', 'fun': lambda W: np.sum(W) - 1})
results = scopt.minimize(objfun, weights, (returns, r), method='SLSQP', constraints = constraints, bounds = bounds)
if not results.success: # handle error
raise Exception(result.message)
result_means.append(np.round(r,4)) # 4 decimal places
std_=np.round(np.std(np.sum(returns*results.x,axis=1)),6)
result_stds.append(std_)
result_weights.append(np.round(results.x, 5))
return {'Means': result_means, 'Stds': result_stds, 'Weights': result_weights}
frontier_data = calc_efficient_frontier(annual_returns)
def plot_efficient_frontier(ef_data):
plt.figure(figsize=(12,8))
plt.title('Efficient Frontier')
plt.xlabel('Standard Deviation of the porfolio (Risk))')
plt.ylabel('Return of the portfolio')
plt.plot(ef_data['Stds'], ef_data['Means'], '--');
plot_efficient_frontier(frontier_data)
etf=get_historical_closes(['PICK','IBB','XBI','MLPX','AMLP','VGT','RYE','IEO','LABU'], '2016-01-01', '2016-12-31')
etf.plot(figsize=(8,6));
daily_returns_etf=calc_daily_returns(etf)
daily_returns_etf
daily_returns_etf_mean=daily_returns_etf.mean()
daily_returns_etf_mean
daily_returns_etf_std=daily_returns_etf.std()
daily_returns_etf_std
daily_returns_ms=pd.concat([daily_returns_etf_mean, daily_returns_etf_std], axis=1)
daily_returns_ms
from sklearn.cluster import KMeans
random_state = 10
y_pred = KMeans(n_clusters=5, random_state=random_state).fit_predict(daily_returns_ms)
plt.scatter(daily_returns_etf_mean, daily_returns_etf_std, c=y_pred);
plt.axis([-3, 3, 0, 0.05]);
import scipy.cluster.hierarchy as hac
corr_mat=daily_returns_etf.corr(method='spearman')
corr_mat
Z = hac.linkage(corr_mat, 'single')
# Plot the dendogram
plt.figure(figsize=(25, 10))
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('sample index')
plt.ylabel('distance')
hac.dendrogram(
Z,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=8., # font size for the x axis labels
)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Uso de Pandas para descargar datos de precios de cierre
Step2: Una vez cargados los paquetes, es necesario definir los tickers de las acciones que se usarán, la fuente de descarga (Yahoo en este caso, pero también se puede desde Google) y las fechas de interés. Con esto, la función DataReader del paquete pandas_datareader bajará los precios solicitados.
Step3: Nota
Step5: 4. Optimización de portafolios
Step6: 5. ETF
|
13,236
|
<ASSISTANT_TASK:>
Python Code:
import keras
from os.path import join
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout,Activation, Lambda,Input
from keras.layers import Embedding
from keras.layers import Convolution1D
from keras.datasets import imdb
from keras import backend as K
from keras.layers import Convolution1D, GlobalMaxPooling1D,Convolution2D,Merge,merge,Reshape,MaxPooling2D,Flatten
from keras.utils import np_utils
from keras.models import Model
import nltk
from nltk.tag import pos_tag
import numpy as np
from keras.regularizers import l2
import theano
file_names = ['stsa.fine.test','stsa.fine.train','stsa.fine.dev']
file_path = '/home/bruce/data/sentiment/citai_process'
def read_file(fname=''):
with open(join(file_path,fname)) as fr:
lines = fr.readlines()
lines = [line.strip().lower() for line in lines]
lables = [int(line[0:1]) for line in lines]
words = [line[2:].split() for line in lines]
return words,lables
train_X,train_y = read_file(fname='stsa.fine.train')
test_X,test_y = read_file(fname='stsa.fine.test')
dev_X,dev_y = read_file(fname='stsa.fine.dev')
print(len(train_X))
print(len(test_X))
print(len(dev_X))
print(train_X[0:2])
print(train_y[0:2])
def tag_sentence(X=[]):
tag_X=[]
for line in X:
word_tag = pos_tag(line,tagset='universal')
tag = [i[1] for i in word_tag]
tag_X.append(tag)
return tag_X
train_tag_X = tag_sentence(X=train_X)
dev_tag_X = tag_sentence(X=dev_X)
test_tag_X = tag_sentence(X=test_X)
print(train_X[0])
print(train_tag_X[0])
senti_file = '/home/bruce/data/sentiment/sentiment_diction/wordwithStrength.txt'
def construct_senti_dict(senti_file=''):
with open(senti_file) as fr:
lines = fr.readlines()
lines = [line.strip().split() for line in lines]
lines = [(i[0],float(i[1])) for i in lines]
return dict(lines)
sentiment_dict=construct_senti_dict(senti_file)
print('sentiment number =',len(sentiment_dict))
def sentiment_strength(X=[],sentiment_dict=sentiment_dict):
sentiment_X = [[sentiment_dict[w] if w in sentiment_dict else 0 for w in line ]for line in X]
sentiment_X = [[ str(int(val*10)) if val <=0 else '+'+str(int(val*10)) for val in line] for line in sentiment_X]
return sentiment_X
train_sentiment_X = sentiment_strength(X=train_X,sentiment_dict=sentiment_dict)
dev_sentiment_X = sentiment_strength(X=dev_X,sentiment_dict=sentiment_dict)
test_sentiment_X = sentiment_strength(X=test_X,sentiment_dict=sentiment_dict)
assert len(train_sentiment_X) == len(train_X)
print(train_sentiment_X[0:5])
print(train_X[0:5])
print(train_y[0:5])
def token_to_index(datas=[]):
word_index={}
count=1
for data in datas:
for list_ in data:
for w in list_:
if w not in word_index:
word_index[w] = count
count = count + 1
print('leng of word_index =',len(word_index))
for i in range(len(datas)):
datas[i] = [[ word_index[w] for w in line ] for line in datas[i]]
return datas,word_index
X,word_index = token_to_index(datas=[train_X,dev_X,train_sentiment_X,train_tag_X,dev_sentiment_X,dev_tag_X])
train_X,dev_X,train_sentiment_X,train_tag_X,dev_sentiment_X,dev_tag_X = X
print('length of dict_index = ',len(word_index))
print(train_sentiment_X[0:2])
print(train_X[0:2])
print(train_y[0:2])
embedding_dim = 100
we_file = '/home/bruce/data/glove/twitter/glove.twitter.27B.{0}d.txt'.format(embedding_dim)
def get_index_wordembedding(we_file='',word_index={}):
index_wordembedding ={}
zeros = np.zeros(embedding_dim)
for line in open(we_file):
elements = line.strip().split()
if elements[0] in word_index:
index = word_index[elements[0]]
wordembedding = [float(i) for i in elements[1:]]
index_wordembedding[index] = wordembedding
print('总word的数目= ',len(word_index))
print('总word embedding 的数目 = ',len(index_wordembedding))
for word,index in word_index.items():
if index not in index_wordembedding:
index_wordembedding[index] = zeros
assert len(index_wordembedding) == len(word_index)
return index_wordembedding
index_wordembedding = get_index_wordembedding(we_file=we_file,word_index=word_index)
def get_trained_embedding(index_wordembedding=None):
index_we = sorted(index_wordembedding.items())
print('index_we[0] =',index_we[0])
trained_embedding = [t[1] for t in index_we]
zeros = np.zeros(embedding_dim)
trained_embedding = np.vstack((zeros,trained_embedding))
return np.array(trained_embedding)
def batch_indexData_embedding(X=None,index_wordembedding={}):
zeros = np.zeros(embedding_dim)
return [ [ index_wordembedding[w] if w in index_wordembedding else zeros for w in line ] for line in X ]
max_len = 36
batch_size=50
max_features= 14526
#embedding_dims=50
nb_filter = 300
filter_length1 = 2
filter_length2 = 3
filter_length3 = 4
filter_size=(3,100)
dense1_hindden = 150*2
nb_classes = 5
print('Build model...')
input_random = Input(shape=(max_len,), dtype='int32', name='main_input1')
embedding = Embedding(output_dim=embedding_dim, input_dim=max_features)(input_random)
# 卷积层
conv1 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)(embedding)
conv2 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)(embedding)
conv3 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)(embedding)
conv1 =GlobalMaxPooling1D(conv1)
conv2 =GlobalMaxPooling1D()(conv2)
conv3 =GlobalMaxPooling1D()(conv3)
merged_vector = merge([conv1,conv2,conv3], mode='concat')
# 全连接层
dense_layer = Dense(dense1_hindden)
dens1 = dense_layer(merged_vector)
print('dense_layer input_shape should == (300,)')
print(dense_layer.input_shape)
dens1 = Activation('relu')(dens1)
# softmax层
dens2 = Dense(nb_classes)(dens1)
output_random = Activation('softmax')(dens2)
model = Model(input=input_random,output=output_random)
print('finish build model')
model.compile(optimizer='adadelta',
loss='categorical_crossentropy',
metrics=['accuracy'])
input_static = Input(shape=(max_len,embedding_dim), name='main_input2')
# 卷积层
conv1 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)(input_static)
conv2 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)(input_static)
conv3 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)(input_static)
conv1 =GlobalMaxPooling1D()(conv1)
conv2 =GlobalMaxPooling1D()(conv2)
conv3 =GlobalMaxPooling1D()(conv3)
merged_vector = merge([conv1,conv2,conv3], mode='concat')
# 全连接层
dens1 = Dense(dense1_hindden)(merged_vector)
dens1 = Activation('relu')(dens1)
# softmax层
dens2 = Dense(nb_classes)(dens1)
output_static = Activation('softmax')(dens2)
model = Model(input=input_static,output=output_static)
print('finish build model')
model.compile(optimizer='adadelta',
loss='categorical_crossentropy',
metrics=['accuracy'])
print('Build model...')
input_non_static = Input(shape=(max_len,), dtype='int32', name='main_input1')
#初始化Embedding层
trained_embedding = get_trained_embedding(index_wordembedding=index_wordembedding)
embedding_layer = Embedding(max_features,
embedding_dim,
weights=[trained_embedding]
)
embedding = embedding_layer(input_non_static)
conv1 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)(embedding)
conv2 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)(embedding)
conv3 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)(embedding)
dropout = Dropout(0.5)
conv1 =GlobalMaxPooling1D()(conv1)
conv2 =GlobalMaxPooling1D()(conv2)
conv3 =GlobalMaxPooling1D()(conv3)
#conv1 = dropout(conv1)
#conv2 = dropout(conv2)
#conv3 = dropout(conv3)
merged_vector = merge([conv1,conv2,conv3], mode='concat')
# 全连接层
dense_layer = Dense(dense1_hindden)
dens1 = dense_layer(merged_vector)
print('dense_layer input shpae = ',dense_layer.input_shape)
dens1 = Activation('relu')(dens1)
dens1 = dropout(dens1)
# softmax层
dens2 = Dense(nb_classes)(dens1)
output_non_static = Activation('softmax')(dens2)
model = Model(input=input_non_static,output=output_non_static)
print('finish build model')
model.compile(optimizer='adadelta',
loss='categorical_crossentropy',
metrics=['accuracy'])
print('Build model...')
input1 = Input(shape=(max_len,), dtype='int32', name='main_input1')
input2 = Input(shape=(max_len,), name='main_input2')
#input3 = Input(shape=(max_len,), dtype='int32', name='main_input3')
embedding = Embedding(output_dim=embedding_dim, input_dim=max_features)
embedding1 = embedding(input1)
print('embedding1 output_shape = ',embedding.output_shape)
embedding2 = embedding(input2)
merged_vector = merge([embedding1,embedding2], mode='concat')
reshape = Reshape((2,max_len,embedding_dim))
word_sentiment = reshape(merged_vector)
print('reshape output_shape = ',reshape.output_shape)
conv_layer1 = Convolution2D(nb_filter, filter_size[0], filter_size[1],
activation='relu',
border_mode='valid')
conv1 = conv_layer1(word_sentiment)
print('conv_layer1 output shpae should be (100,35,1)',conv_layer1.output_shape)
maxpool = MaxPooling2D(pool_size=(34, 1))
conv1 = maxpool(conv1)
print('(100,1)==', maxpool.output_shape)
fatten = Flatten()
conv1 = fatten(conv1)
dens1 = Dense(dense1_hindden)(conv1)
dens1 = Activation('relu')(dens1)
dropout = Dropout(0.5)
dens1 = dropout(dens1)
dens2 = Dense(nb_classes)(dens1)
output = Activation('softmax')(dens2)
#model = Model(input=[input1,input2],output=output)
model = Model(input=[input1,input2],output=output)
print('finish build model')
model.compile(optimizer='adadelta',
loss='categorical_crossentropy',
metrics=['accuracy'])
print('Build model...')
input1 = Input(shape=(max_len,), dtype='int32', name='main_input1')
input2 = Input(shape=(max_len,), name='main_input2')
#input3 = Input(shape=(max_len,), dtype='int32', name='main_input3')
embedding = Embedding(output_dim=embedding_dim, input_dim=max_features)
embedding1 = embedding(input1)
embedding2 = embedding(input2)
merged_vector = merge([conv11,conv12], mode='concat')
reshape = Reshape((2,max_len,embedding_dim))
word_sentiment = reshape(merged_vector)
print('reshape input shpae should be (72,100)= ',reshape.input_shape)
#embedding3 = embedding(input3)
#---------------------------------------------------------------------------
#卷积方法一:每个通道,用不同的卷积核
'''
cov1_out1 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'valid',
activation='relu'
)(embedding1)
cov1_out2 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'valid',
activation='relu'
)(embedding2)
cov1_out3 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'valid',
activation='relu'
)(embedding3)
'''
# 卷积方法二:每个通道用相同的卷积核
conv11 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)
conv12 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length2,
border_mode = 'valid',
activation='relu'
)
conv13 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length3,
border_mode = 'valid',
activation='relu'
)
conv14 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length1,
border_mode = 'valid',
activation='relu'
)
conv15 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length2,
border_mode = 'valid',
activation='relu'
)
conv16 = Convolution1D(nb_filter = nb_filter,
filter_length = filter_length3,
border_mode = 'valid',
activation='relu'
)
dropout = Dropout(0.5)
#第一个通道
cov1_out11 = conv11(embedding1)
cov1_out12 = conv12(embedding1)
cov1_out13 = conv13(embedding1)
'''
cov1_out11 = dropout(cov1_out11)
cov1_out12 = dropout(cov1_out12)
cov1_out13 = dropout(cov1_out13)
'''
'''
#第二个通道
cov1_out14 = conv14(embedding2)
cov1_out15 = conv15(embedding2)
cov1_out16 = conv16(embedding2)
'''
#第三个通道:
'''
cov1_out14 = dropout(cov1_out14)
cov1_out15 = dropout(cov1_out15)
cov1_out16 = dropout(cov1_out16)
'''
#cov1_out2 = conv(embedding2)
#cov1_out3 = conv(embedding3)
#------------------------------------------------------------------------------
maxpooling = GlobalMaxPooling1D()
conv11 = maxpooling(cov1_out11)
conv12 = maxpooling(cov1_out12)
conv13 = maxpooling(cov1_out13)
conv14 = maxpooling(cov1_out14)
conv15 = maxpooling(cov1_out15)
conv16 = maxpooling(cov1_out16)
#merged_vector = merge([conv11,conv12,conv13,conv14,conv15,conv16], mode='concat')
merged_vector = merge([conv11,conv12,conv13], mode='concat')
#dropout = Dropout(0.5)
#merged_vector = dropout(merged_vector)
dens1 = Dense(dense1_hindden)(merged_vector)
dens1 = Activation('relu')(dens1)
dens1 = dropout(dens1)
dens2 = Dense(nb_classes)(dens1)
output = Activation('softmax')(dens2)
#model = Model(input=[input1,input2],output=output)
model = Model(input=[input1],output=output)
print('finish build model')
model.compile(optimizer='adadelta',
loss='categorical_crossentropy',
metrics=['accuracy'])
from IPython.display import SVG
from keras.utils.visualize_util import model_to_dot
SVG(model_to_dot(model).create(prog='dot', format='svg'))
print(type(train_y[0]))
train_y_model = np_utils.to_categorical(train_y, nb_classes)
dev_y_model = np_utils.to_categorical(dev_y, nb_classes)
train_X_model = sequence.pad_sequences(train_X, maxlen=max_len)
dev_X_model = sequence.pad_sequences(dev_X, maxlen=max_len)
train_sentiment_X_model = sequence.pad_sequences(train_sentiment_X,maxlen=max_len)
train_tag_X_model= sequence.pad_sequences(train_tag_X,maxlen=max_len)
dev_sentiment_X_model = sequence.pad_sequences(dev_sentiment_X,maxlen=max_len)
dev_tag_X_model = sequence.pad_sequences(dev_tag_X,maxlen=max_len)
#train_embedding_X_model = batch_indexData_embedding(X=train_X_model,index_wordembedding=index_wordembedding)
dev_embedding_X_model = batch_indexData_embedding(X=dev_X_model,index_wordembedding=index_wordembedding)
dev_embedding_X_model = np.array(dev_embedding_X_model)
#转为index
def to_index(word_index={},data=[]):
return [[word_index[w] if w in word_index else 0 for w in sentence] for sentence in data]
test_index_X = to_index(word_index,test_X)
test_sentiment_X = to_index(word_index,test_sentiment_X)
test_tag_X = to_index(word_index,test_tag_X)
#删补
test_index_X_model = sequence.pad_sequences(test_index_X, maxlen=max_len)
test_sentiment_X_model = sequence.pad_sequences(test_sentiment_X, maxlen=max_len)
test_tag_X_model = sequence.pad_sequences(test_tag_X, maxlen=max_len)
#embedding
test_embedding_X = batch_indexData_embedding(X=test_index_X,index_wordembedding=index_wordembedding)
test_y_model = np_utils.to_categorical(test_y, nb_classes)
## test
def my_generator4(X1=None,X2=None,y=None):
i = 0
max_i = int(len(X1)/batch_size)
while True:
i = i % max_i
x1_batch = X1[i*batch_size:(i+1)*batch_size]
x2_batch = X2[i*batch_size:(i+1)*batch_size]
#x3_batch = X3[i*batch_size:(i+1)*batch_size]
y_batch = y[i*batch_size:(i+1)*batch_size]
yield ([x1_batch,x2_batch],y_batch)
i = i + 1
def my_generator3(X1=None,y=None):
i = 0
max_i = int(len(X1)/batch_size)
while True:
i = i % max_i
x1_batch = X1[i*batch_size:(i+1)*batch_size]
x2_batch = batch_indexData_embedding(X=x1_batch,index_wordembedding=index_wordembedding)
x2_batch = np.array(x2_batch)
y_batch = y[i*batch_size:(i+1)*batch_size]
yield ([x1_batch,x2_batch],y_batch)
i = i + 1
def my_generator1(X1=None,y=None):
i = 0
max_i = int(len(X1)/batch_size)
while True:
i = i % max_i
x1_batch = X1[i*batch_size:(i+1)*batch_size]
y_batch = y[i*batch_size:(i+1)*batch_size]
yield (x1_batch,y_batch)
i = i + 1
def my_generator2(X1=None,y=None):
i = 0
max_i = int(len(X1)/batch_size)
while True:
i = i % max_i
x1_batch = X1[i*batch_size:(i+1)*batch_size]
x1_batch = batch_indexData_embedding(X=x1_batch,index_wordembedding=index_wordembedding)
x1_batch = np.array(x1_batch)
y_batch = y[i*batch_size:(i+1)*batch_size]
yield (x1_batch,y_batch)
i = i + 1
model.fit_generator(my_generator1(train_X_model,train_y_model),samples_per_epoch = 32*100,nb_epoch=100,verbose=1,validation_data=(dev_X_model,dev_y_model))
model.fit_generator(my_generator4(train_X_model,train_sentiment_X_model,train_y_model),samples_per_epoch = 32*100,nb_epoch=100,verbose=1,validation_data=([test_embedding_X,test_sentiment_X_model],test_y))
model.fit_generator(my_generator1(train_X_model,train_y_model),samples_per_epoch = 50*40,nb_epoch=100,verbose=1,validation_data=(test_index_X,test_y))
#model.fit_generator(my_generator1(train_X_model,train_y_model),samples_per_epoch = 50*60,nb_epoch=100,verbose=1,validation_data=([test_index_X_model],test_y))
model.fit_generator(my_generator4(train_X_model,train_sentiment_X_model,train_y_model),samples_per_epoch = 50*60,nb_epoch=100,verbose=1,validation_data=([test_index_X_model,test_sentiment_X_model],test_y_model))
model.fit_generator(my_generator4(train_X_model,train_sentiment_X_model,train_y_model),samples_per_epoch = 50*60,nb_epoch=100,verbose=1,validation_data=([test_index_X_model,test_sentiment_X_model],test_y_model))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: POS当作一个通道。
Step2: 情感极性当作一个通道。
Step3: 构建情感极性强度通道
Step4: 否定词。
Step5: Glove训练好的词向量
Step6: 获取训练好的word embedding 数组,用来初始化 Embedding
Step7: 将一个batch大小的index数据,利用index_wordembedding进行embedding
Step8: 构建模型
Step9: 错误记录
Step10: CNN-static 模型
Step11: CNN-non-static 模型
Step12: CNN-multichannel 模型
Step13: 模型图
Step14: 模型输入
Step15: 测试数据
Step16: 训练模型
Step17: cnn random 结果
Step18: cnn static 结果
Step19: cnn non-static 结果
|
13,237
|
<ASSISTANT_TASK:>
Python Code:
def myFun(x):
return (x**x)**x
myFun(9)
timeit(myFun(12))
%timeit 10*1000000
# this syntax allows comments ... note that if you leave off the numeric argument, %timeit seems to do nothing
myFun(12)
%timeit 10*1000000
# this syntax allows comments ... note that if you leave off the numeric argument, %timeit seems to do nothing
myFun(12)
%%timeit
# this syntax allows comments ... if defaults the looping argument
myFun(12)
%time
# generates "wall time" instead of CPU time
myFun(12)
# getting more detail using %time on a script or code
%time {for i in range(10*1000000): x=1}
%timeit -n 1 10*1000000
# does it just once which may be inaccurate due to random events
myFun(12)
def find_symmetricDiff_inputSetsAB_v1():
len_setA = int(input())
set_A = set([int(i) for i in input().split()])
len_setB = int(input())
set_B = set([int(i) for i in input().split()])
[print(val) for val in sorted(list(set_A.difference(set_B).union(set_B.difference(set_A))))]
def find_symmetricDiff_inputSetsAB_v2():
setsLst = [0,0]
for i in range(2):
int(input()) # eat value ... don't need it
setsLst[i] = set([int(i) for i in input().split()])
[print(val) for val in sorted(list(setsLst[0].difference(setsLst[1]).union(setsLst[1].difference(setsLst[0]))))]
''' understanding next two versions:
* key=int, applies int() to each value to be sorted so the values are sorted as 1,2,3 ... not: '1', '2', '3'
* a^b is the same as a.symmetric_difference(b)
these two come from discussion boards on hackerrank
'''
def find_symmetricDiff_inputSetsAB_v3():
a,b = [set(input().split()) for _ in range(4)][1::2]
return '\n'.join(sorted(a.symmetric_difference(b), key=int))
def find_symmetricDiff_inputSetsAB_v4():
a,b = [set(input().split()) for _ in range(4)][1::2]
return '\n'.join(sorted(a^b, key=int))
i1 = int(1000000000000000000008889934567)
i2 = int(73277773377737373000000000000007777888)
print(i1)
print(i2)
%timeit -n 1 10*1000000
find_symmetricDiff_inputSetsAB_v1()
# timeit(find_symmetricDiff_inputSetsAB_v1(), 1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For this example, timeit() needs to be the only function in the cell, and then your code is called in as a valid function call as in this demo
Step2: Should this malfunction and/or throw errors, try restarting the kernel and re-running all pre-requisite cells and then this syntax should work.
Step3: If you get the 'slowest run took ...' message, try re-running the code cell to over-write the caching
Step4: Unlike timeit(), the other options provided here (using iPython cell magics) can test any snippet of code within a python cell.
Step5: These tests use the following inputs. As per requirements in the challenge problem, what each line mean is also given here
|
13,238
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
class CubicSpline():
''' cubic spline of a function
- equally-spaced knots
- derivatives specified at endpoints
'''
def __init__(self, fn, xmin, xmax, n, df_left, df_right):
''' set up the spline by solving the required linear equation
'''
# function values at knots
self.x = np.linspace(xmin, xmax, n)
self.y = np.array(list(map(fn, self.x)))
self.h = self.x[1]-self.x[0]
# solve for z's
# tridiagonal matrix
A = np.diag([2.] + [4.]*(n-2) + [2.]) \
+ np.diag([1.]*(n-1), 1) + np.diag([1.]*(n-1), -1)
# rhs
b = np.zeros(n)
# interior knots
b[1:(n-1)] = (6/self.h**2)*(self.y[:(n-2)]-2*self.y[1:(n-1)]+self.y[2:])
# endpoints
b[0] = (6/self.h**2)*(self.y[1]-self.y[0]) - (6/self.h)*df_left
b[n-1] = -(6/self.h**2)*(self.y[n-1]-self.y[n-2]) + (6/self.h)*df_right
# solve and store z-values
self.z = np.linalg.solve(A, b)
def __call__(self, x):
''' approximate the function at given location
'''
# linear search to find the interval x belongs to
# (simple but not the most efficient)
if x<self.x[0] or x>self.x[-1]:
raise ValueError("x out of range: {}".format(x))
for n, _x in enumerate(self.x):
if x<=_x:
break
# polynomial values
a = (_x - x)/self.h
b = 1-a
c = (self.h**2/6)*a*(a**2 - 1)
d = (self.h**2/6)*b*(b**2 - 1)
# interpolation results
return a*self.y[n-1] + b*self.y[n] + c*self.z[n-1] + d*self.z[n]
sin_spline = CubicSpline(np.sin, 0.0, 2.*np.pi, 11, 1, -1)
sin_spline(1.0)
np.sin(1.0)
xmin = 1.0
xmax = np.exp(1.0)
n = 11
log_spline = CubicSpline(np.log, xmin, xmax, n, 1, np.exp(-1))
xtest = np.linspace(xmin, xmax, 500)
yexact = np.array(list(map(np.log, xtest)))
yapprox = np.array(list(map(log_spline, xtest)))
plt.plot(xtest, yapprox-yexact)
plt.title("Approximation of log(x) by a cubic spline: {} points".format(n))
plt.xlabel('x')
plt.ylabel("approximation - exact")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Try it out
Step2: Try it out over a range
|
13,239
|
<ASSISTANT_TASK:>
Python Code:
class TestIterator:
def __init__(self, max_value):
self._current_value = 0
self._max_value = max_value
def __next__(self):
self._current_value += 1
if self._current_value > self._max_value:
raise StopIteration()
return self._current_value
iterator = TestIterator(3)
try:
while True:
print(next(iterator))
except StopIteration:
pass
for i in TestIterator(3):
print(i)
list(TestIterator(3))
class TestIterable:
def __init__(self, max_value):
self._max_value = max_value
def __iter__(self):
return TestIterator(self._max_value)
for i in TestIterable(3):
print(i)
for i in [1, 2, 3]:
print(i)
test_iterable = TestIterable(3)
test_iterator = iter(test_iterable)
print(test_iterable)
class RealTestIterator(TestIterator):
def __iter__(self):
return self
for i in RealTestIterator(3):
print(i)
iterator = RealTestIterator(3)
for i in iterator:
print(i)
for i in iterator:
# iterator directly raises StopIteration, so this is never reached
print(i)
def is_iterator(it):
return iter(it) is it
print(is_iterator(RealTestIterator(3)))
print(is_iterator(TestIterable(3)))
def test():
yield 1
yield 2
print(test)
print(test())
t = test()
try:
while True:
print(next(t))
except StopIteration:
print('done')
for i in test():
print(i)
def test():
x = yield 1
yield x**2
t = test()
print(next(t)) # go to the first yield
print(t.send(3))
def test():
yield 1
yield 2
def wrapper():
for i in test():
yield i
for i in wrapper():
print(i)
def test():
x = yield 1
yield x**2
def wrapper():
yield from test()
w = wrapper()
print(next(w))
print(w.send(3))
def test():
for i in range(3):
yield i
return 'done'
for i in test():
print(i)
t = test()
try:
while True:
print(next(t))
except StopIteration as e:
print(e.value)
def wrapper():
value = yield from test()
print('wrapper got:', value)
return 'wrapper done'
for i in wrapper():
print(i)
[xy for xy in range(3)]
xy
(xy for xy in range(3))
xy
[i for i in range(3) if (yield i)]
set([i**2 for i in range(3) if (yield i)])
set([(yield i**2) for i in range(3)])
def g():
return [i for i in (yield range(3))]
next(g())
set(i**2 for i in range(3) if (yield i))
{i**2 for i in range(3) if (yield i)}
{i: i**2 for i in range(3) if (yield i)}
[i for i in range(3) if (yield from i)]
set([i for i in range(3) if (yield from i)])
import unittest.mock as mock
m = mock.Mock(side_effect=[1, 2])
def test():
yield m()
yield m()
yield m()
for i in test():
print(i)
def test():
try:
i = 1
while True:
yield i
i += 1
except GeneratorExit:
print('done')
print('bye')
t = test()
print(next(t))
print(next(t))
t.close()
try:
print(next(t))
except StopIteration:
print('no more values')
def test():
i = 1
while True:
yield i
i += 1
t = test()
print(next(t))
print(next(t))
t.close()
try:
print(next(t))
except StopIteration:
print('no more values')
def test():
try:
i = 1
while True:
yield i
i += 1
except GeneratorExit:
print('done')
yield 'just one more value'
t = test()
print(next(t))
print(next(t))
t.close()
def test():
try:
i = 1
while True:
yield i
i += 1
except GeneratorExit:
print('done')
yield 'one more value'
yield 'and another one'
t = test()
print(next(t))
print(next(t))
print(t.throw(GeneratorExit()))
print(next(t))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: When you perform the iteration manually you should use the builtin next function to call the magic __next__ method.
Step2: Of course you can also use a standard for-loop. However, the for-loop actually expects to be given a so called iterable object, not an iterator.
Step3: The same is the case for list constructors.
Step4: Iterables
Step5: Now we can finally use the standard for-loop
Step6: This is convenient, because all the standard container classes are iterable. So you can directly put them into a for-loop or list constructor, without first having to manually create an iterator first.
Step7: Usually on etherefore does not have to use the __iter__ method manually. But if you do, use the builtin iter function instead.
Step8: It would be anoying (and quite surprising) to not be able to use iterators with for-loops. Therefore iterators in Python must include an __iter__ method as well, returning the iterator itself.
Step9: We can now use this iterator as expected. When the for-loop applies the iter function this works and has no effect on the iterator.
Step10: But there is an important semantic difference between the __iter__ of iterables and iterators
Step11: This can cause subtle bugs and is actually a nice example for the pitfalls of duck typing. One possible way to safeguard against this is by testing the semantics of __iter__
Step12: Generators
Step13: The iteration can be performed using the standard iterator API.
Step14: A generator object can be used anywhere an iterator is supported, e.g., for loops.
Step15: Generators as Coroutines
Step16: Note that next(t) is equivalent to t.send(None).
Step17: Doing the same with a coroutine on the other hand is quite hard (see PEP 380), so Python 3.3 introduced yield from.
Step18: The same PEP also introduced return statements in coroutines, to transport a return value via StopIteration.
Step19: The return value also becomes the value of yield from
Step20: So yield from transparently pipes through the iterations and provides the end result value.
Step21: List comprehensions now have their own execution context, just like functions and generator expressions.
Step22: A side effect of this is that a yield statement in some parts of a list comprehension causes it to evaluate to a generator object.
Step23: This can be surprising at first.
Step24: Only the expression list part is not affected by this. A yield statement in this part of the list comprehension works as normally expected (i.e., it refers to the surrounding generator function).
Step25: Generator expressions have always behaved like described above (since they are executed lazily they always had to store their context).
Step26: Set and Dict comprehensions of course act like just list comprehensions.
Step27: With yield from we get the same behavior as with yield.
Step28: Beware of StopIteration
Step29: So a simple error in setting up your mocks can silently cause an unexpected abortion in your asynchronois test code!
Step30: Catching the GeneratorExit is not really necessary here. But if the generator has any resources that need cleanup then one can use a try ... finally or a context manager to perform this.
Step31: Yielding values after the exception was raised is not supported.
Step32: Note that throwing the GeneratorExit exception manually does not have the same effect as calling close.
|
13,240
|
<ASSISTANT_TASK:>
Python Code:
# Imports for pandas, and numpy
import numpy as np
import pandas as pd
# imports for seaborn to and matplotlib to allow graphing
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="whitegrid")
%matplotlib inline
# import Titanic CSV - NOTE: adjust file path as neccessary
dTitTrain_DF = pd.read_csv('train.csv')
# Clearing of Columns not neccesary for statistical analysis
dTitTrain_DF = dTitTrain_DF.drop(["Name", "Ticket"], axis=1)
dTitTrain_DF.info()
dTitTrain_DF.describe()
titAge = dTitTrain_DF.dropna(subset=['Age'])
# Distribution gender (adult and male)
ACmenData = dTitTrain_DF[dTitTrain_DF.Sex == 'male']
ACwomenData = dTitTrain_DF[dTitTrain_DF.Sex == 'female']
ACmenDataCount = float(ACmenData['Sex'].count())
ACwomenDataCount = float(ACwomenData['Sex'].count())
# Gender Specific DFs
AmenData = dTitTrain_DF[dTitTrain_DF.Sex == 'male'][dTitTrain_DF.Age >= 21]
AwomenData = dTitTrain_DF[dTitTrain_DF.Sex == 'female'][dTitTrain_DF.Age >= 21]
AmenDataCount = float(AmenData['Sex'].count())
AwomenDataCount = float(AwomenData['Sex'].count())
# print(menDataCount)
# print(womenDataCount)
# Age Specific Groups
adultData = titAge[titAge.Age >= 21]
childData = titAge[titAge.Age < 21]
adultDataCount = float(adultData['Age'].count())
childDataCount = float(childData['Age'].count())
#print(childDataCount)
#print(adultDataCount)
# Pclass
titClass1 = dTitTrain_DF[dTitTrain_DF.Pclass == 1]
titClass2 = dTitTrain_DF[dTitTrain_DF.Pclass == 2]
titClass3 = dTitTrain_DF[dTitTrain_DF.Pclass == 3]
# Alone or Family
dTitTrain_DF['SoloOrFamily'] = dTitTrain_DF.SibSp + dTitTrain_DF.Parch
dTitTrain_DF['SoloOrFamily'].loc[dTitTrain_DF['SoloOrFamily'] > 0] = 'Family'
dTitTrain_DF['SoloOrFamily'].loc[dTitTrain_DF['SoloOrFamily'] == 0] = 'Alone'
# Survivor Column (Yes or no)
dTitTrain_DF['Survivor']= dTitTrain_DF.Survived.map({0:'No', 1:'Yes'})
titCabin = dTitTrain_DF.dropna(subset=['Cabin'])
# Locational Data Groups
titDecks = titCabin['Cabin']
def deckGrab(tDK, cabLetter):
deckLevels = []
for level in tDK:
deckLevels.append(level[0])
TDF = pd.DataFrame(deckLevels)
TDF.columns = ['Cabin']
TDF = TDF[TDF.Cabin == cabLetter]
return TDF
def deckCount(tDK, cabLetter):
TDF = deckGrab(tDK, cabLetter)
return TDF[TDF.Cabin == cabLetter].count()['Cabin']
# print(deckCount(titDecks, "A"))
# print(deckCount(titDecks, "B"))
# print(deckCount(titDecks, "C"))
# print(deckCount(titDecks, "D"))
# print(deckCount(titDecks, "E"))
# print(deckCount(titDecks, "F"))
# print(deckCount(titDecks, "G"))
# embarked
titCherbourg = dTitTrain_DF[dTitTrain_DF.Embarked == 'C']
titQueenstown = dTitTrain_DF[dTitTrain_DF.Embarked == 'Q']
titSouthampton = dTitTrain_DF[dTitTrain_DF.Embarked == 'S']
printG = "Men account for " + str(ACmenDataCount) + " and " + "Women account for " + str(ACwomenDataCount) + " (Total Passengers: " + str(dTitTrain_DF.count()['Age']) + ")"
print(printG)
gSSC = sns.factorplot('Sex', data=dTitTrain_DF, kind='count')
gSSC.despine(left=True)
gSSC.set_ylabels("count of passengers")
gGCSC= sns.factorplot('Pclass',order=[1,2,3], data=dTitTrain_DF, hue='Sex', kind='count')
gGCSC.despine(left=True)
gGCSC.set_ylabels("count of passengers")
printA = "Youngest Passenger in the passenger list was " + str(titAge['Age'].min()) + " years of age." \
+ "\n" + "Oldest Passenger in the passenger list was " + str(titAge['Age'].max()) + " years of age." \
+ "\n" + "Mean of Passengers ages in the passenger list is " + str(titAge['Age'].mean()) + " years of age."
print(printA)
titAge['Age'].hist(bins=80)
gCPS = sns.FacetGrid(titAge,hue='Pclass', aspect=4, hue_order=[1,2,3])
gCPS.map(sns.kdeplot,'Age', shade=True)
gCPS.set(xlim=(0,titAge['Age'].max()))
gCPS.add_legend()
# splits passengers into 3 categories (male of female if considered adult, and child if below 21 of age)
def minorOrAdult(passenger):
age, sex = passenger
if age < 21:
return 'child'
else:
return sex
# adds new column to dataframe that distinguishes a passenger as a child or an adult
dTitTrain_DF['PersonStatus'] = dTitTrain_DF[['Age', 'Sex']].apply(minorOrAdult, axis=1)
dTitTrain_DF['PersonStatus'].value_counts()
gACPS = sns.FacetGrid(dTitTrain_DF, hue='PersonStatus', aspect=4, hue_order=['child', 'male', 'female'])
gACPS.map(sns.kdeplot,'Age', shade=True)
gACPS.set(xlim=(0,titAge['Age'].max()))
gACPS.add_legend()
gGAC= sns.factorplot('Pclass', order=[1,2,3], data=dTitTrain_DF, hue='PersonStatus', kind='count',hue_order=['child','male','female'])
gGAC.despine(left=True)
gGAC.set_ylabels("count of passengers")
sns.factorplot('SoloOrFamily', data=dTitTrain_DF, kind='count')
print("Alone: " + str(dTitTrain_DF[dTitTrain_DF.SoloOrFamily == "Alone"].count()['SoloOrFamily']))
print("Family: " + str(dTitTrain_DF[dTitTrain_DF.SoloOrFamily == "Family"].count()['SoloOrFamily']))
def prepareDeckGraph(titDecksDF):
deckLevels = []
for level in titDecksDF:
deckLevels.append(level[0])
T_DF = pd.DataFrame(deckLevels)
T_DF.columns = ['Cabin']
T_DF = T_DF[T_DF.Cabin != 'T']
return T_DF
gTD_DF = prepareDeckGraph(titDecks)
sns.factorplot('Cabin', order=['A','B','C','D','E','F','G'], data=gTD_DF, kind='count')
print("A: " + str(deckCount(titDecks, "A")))
print("B: " + str(deckCount(titDecks, "B")))
print("C: " + str(deckCount(titDecks, "C")))
print("D: " + str(deckCount(titDecks, "D")))
print("E: " + str(deckCount(titDecks, "E")))
print("F: " + str(deckCount(titDecks, "F")))
print("G: " + str(deckCount(titDecks, "G")))
sns.factorplot('Embarked', order=['C','Q','S'], data=dTitTrain_DF, hue='Pclass', kind='count', hue_order=[1,2,3])
# titCherbourg
# titQueenstown
# titSouthampton
print("Total:")
print("Cherbourg: " + str(titCherbourg.count()['Embarked']))
print("Queenstown: " + str(titQueenstown.count()['Embarked']))
print("Southampton: " + str(titSouthampton.count()['Embarked']))
print("")
print("Cherbourg: ")
print("Pclass 1 - " + str(titCherbourg[titCherbourg.Pclass == 1].count()['Embarked']))
print("Pclass 2 - " + str(titCherbourg[titCherbourg.Pclass == 2].count()['Embarked']))
print("Pclass 3 - " + str(titCherbourg[titCherbourg.Pclass == 3].count()['Embarked']))
print("")
print("Queenstown: ")
print("Pclass 1 - " + str(titQueenstown[titQueenstown.Pclass == 1].count()['Embarked']))
print("Pclass 2 - " + str(titQueenstown[titQueenstown.Pclass == 2].count()['Embarked']))
print("Pclass 3 - " + str(titQueenstown[titQueenstown.Pclass == 3].count()['Embarked']))
print("")
print("Southampton: ")
print("Pclass 1 - " + str(titSouthampton[titSouthampton.Pclass == 1].count()['Embarked']))
print("Pclass 2 - " + str(titSouthampton[titSouthampton.Pclass == 2].count()['Embarked']))
print("Pclass 3 - " + str(titSouthampton[titSouthampton.Pclass == 3].count()['Embarked']))
# Survivors Overall
gSOA = sns.factorplot('Survivor', data=dTitTrain_DF, kind='count')
gSOA.despine(left=True)
gSOA.set_ylabels("count of passengers")
print("Survivor: " + str(dTitTrain_DF[dTitTrain_DF.Survivor == "Yes"].count()['Survivor']))
print("Non-Survivor: " + str(dTitTrain_DF[dTitTrain_DF.Survivor == "No"].count()['Survivor']))
# Series probability - access probability of survived in men and women
menProb = ACmenData.groupby('Sex').Survived.mean()
womenProb = ACwomenData.groupby('Sex').Survived.mean()
menPercent = menProb[0]*100
womenPercent = womenProb[0]*100
print("Men Survivalbility: ")
print(menProb[0])
print("Women Survivalbility: ")
print(womenProb[0])
gSSP = sns.factorplot("Sex", "Survived", data=dTitTrain_DF, kind="bar", size=5)
gSSP.despine(left=True)
gSSP.set_ylabels("survival probability")
# Determines the probability of survival for a given Pclass
def define_pClassProb(dataFrameIN, numClass):
classEntries = dataFrameIN[dataFrameIN.Pclass == numClass]
sClassEntries = classEntries[classEntries.Survived == 1]
cClassEntries = (classEntries.count(numeric_only=True)['Pclass']).astype(float)
cSClassEntries = (sClassEntries.count(numeric_only=True)['Pclass']).astype(float)
return (cSClassEntries/cClassEntries)
print("Class 1 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 1))
print("Class 2 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 2))
print("Class 3 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 3))
gCS = sns.factorplot("Pclass", "Survived",order=[1,2,3],data=dTitTrain_DF, kind="bar", size=5)
gCS.despine(left=True)
gCS.set_ylabels("survival probability")
print("Class 1 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 1))
print("Class 2 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 2))
print("Class 3 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 3))
sns.factorplot("Pclass", "Survived",order=[1,2,3], data=dTitTrain_DF, kind='point')
# determines the probability of survival for genders in a given Pclass
def define_pClassProbSex(dataFrameIN, numClass, sex):
classEntries = dataFrameIN[dataFrameIN.Pclass == numClass][dataFrameIN.Sex == sex]
sClassEntries = classEntries[classEntries.Survived == 1]
cClassEntries = (classEntries.count(numeric_only=True)['Pclass']).astype(float)
cSClassEntries = (sClassEntries.count(numeric_only=True)['Pclass']).astype(float)
return (cSClassEntries/cClassEntries)
print("Class 1 Survivality(MALE): ")
print(define_pClassProbSex(dTitTrain_DF, 1, 'male'))
print("Class 1 Survivality(FEMALE): ")
print(define_pClassProbSex(dTitTrain_DF, 1, 'female'))
print("Class 2 Survivality(MALE): ")
print(define_pClassProbSex(dTitTrain_DF, 2, 'male'))
print("Class 2 Survivality(FEMALE): ")
print(define_pClassProbSex(dTitTrain_DF, 2, 'female'))
print("Class 3 Survivality(MALE): ")
print(define_pClassProbSex(dTitTrain_DF, 3, 'male'))
print("Class 3 Survivality(FEMALE): ")
print(define_pClassProbSex(dTitTrain_DF, 3, 'female'))
gGCSP = sns.factorplot("Pclass", "Survived",order=[1,2,3],data=dTitTrain_DF,hue='Sex', kind='bar')
gGCSP.despine(left=True)
gGCSP.set_ylabels("survival probability")
sns.factorplot("Pclass", "Survived", hue='Sex',order=[1,2,3], data=dTitTrain_DF, kind='point')
#Determine probability of survival of children in a given Pclass
def define_pClassChildProb(dataFrameIN, numClass):
ChildDF = dataFrameIN[dataFrameIN.Pclass == numClass][dataFrameIN.PersonStatus == 'child']
ChildSurvived = dataFrameIN[dataFrameIN.Pclass == numClass][dataFrameIN.PersonStatus == 'child'][dataFrameIN.Survivor == 'Yes']
totalCChild = ChildDF.count()['PassengerId'].astype(float)
CChildSurvived = ChildSurvived.count()['PassengerId'].astype(float)
return CChildSurvived/totalCChild
def define_pClassAdultProb(dataFrameIN, numClass, sex):
AdultDF = dataFrameIN[dataFrameIN.Pclass == numClass][dataFrameIN.PersonStatus == sex]
AdultSurvived = dataFrameIN[dataFrameIN.Pclass == numClass][dataFrameIN.PersonStatus == sex][dataFrameIN.Survivor == 'Yes']
totalCAdult = AdultDF.count()['PassengerId'].astype(float)
CAdultSurvived = AdultSurvived.count()['PassengerId'].astype(float)
return CAdultSurvived/totalCAdult
print("PClass 1 Survival Child: ")
print(define_pClassChildProb(dTitTrain_DF, 1))
print("PClass 1 Survival Female: ")
print(define_pClassAdultProb(dTitTrain_DF, 1, 'female'))
print("PClass 1 Survival Male: ")
print(define_pClassAdultProb(dTitTrain_DF, 1, 'male'))
print("-----------")
print("PClass 2 Survival Child: ")
print(define_pClassChildProb(dTitTrain_DF, 2))
print("PClass 2 Survival Female: ")
print(define_pClassAdultProb(dTitTrain_DF, 2, 'female'))
print("PClass 2 Survival Male: ")
print(define_pClassAdultProb(dTitTrain_DF, 2, 'male'))
print("-----------")
print("PClass 3 Survival Child: ")
print(define_pClassChildProb(dTitTrain_DF, 3))
print("PClass 3 Survival Female: ")
print(define_pClassAdultProb(dTitTrain_DF, 3, 'female'))
print("PClass 3 Survival Male: ")
print(define_pClassAdultProb(dTitTrain_DF, 3, 'male'))
sns.factorplot("Pclass", "Survived", hue='PersonStatus',order=[1,2,3], data=dTitTrain_DF, kind='point')
#sns.lmplot('Age', 'Survived', data=dTitTrain_DF)
pSBA = sns.boxplot(data=dTitTrain_DF, x='Survived', y='Age')
pSBA.set(title='Age Distribution by Survival',
xlabel = 'Survival',
ylabel = 'Age Distrobution',
xticklabels = ['Died', 'Survived'])
# Using Solo or family column created earlier in passenger distributions section created a separate dataframes for traveling
#alone and with family passengers
familyPass = dTitTrain_DF[dTitTrain_DF['SoloOrFamily'] == "Family"]
alonePass = dTitTrain_DF[dTitTrain_DF['SoloOrFamily'] == "Alone"]
# Creates a list of surviving family and alone passengers
AFamilyPass = familyPass[familyPass.Survivor == "Yes"]
AAlonePass = alonePass[alonePass.Survivor == "Yes"]
# Determines the probability of survival for passengers that traveled alone and with family
pAF = float(AFamilyPass['SoloOrFamily'].count()) / float(familyPass['SoloOrFamily'].count())
pAA = float(AAlonePass['SoloOrFamily'].count()) / float(alonePass['SoloOrFamily'].count())
print("Probability of Survival being with Family: ")
print(pAF)
print("")
print("Probability of Survival being alone: ")
print(pAA)
gSSP = sns.factorplot("SoloOrFamily", "Survived", data=dTitTrain_DF, kind="bar", size=5)
gSSP.despine(left=True)
gSSP.set_ylabels("survival probability")
#sns.lmplot('Age', 'Survived',hue='Pclass', data=dTitanic_DF, hue_order=[1,2,3])
pACSB = sns.boxplot(data = dTitTrain_DF.dropna(subset = ['Age']).sort_values('Pclass'), x='Pclass', y='Age', hue='Survivor')
pACSB.set(title='Age by Class and Survival - Box Plot', xlabel='Pclass')
pACSB.legend(bbox_to_anchor=(1.05, .7), loc=2, title = 'Survived',borderaxespad=0.)
#sns.lmplot('Age', 'Survived', hue='Sex' ,data=dTitanic_DF)
pAGSB = sns.boxplot(data=dTitTrain_DF.dropna(subset = ['Age']), x= 'Sex', y= 'Age', hue='Survivor')
pAGSB.set(title='Age by Gender and Survival - Box Plot')
pAGSB.legend(bbox_to_anchor=(1.05, .7), loc=2, title = 'Survived',borderaxespad=0.)
# Determining better odds which will be compared to test group (First comparison - Pclass and age group)
import csv
# # Manual - Age Group and gender adult with highest above 49%
# print(define_pClassChildProb(dTitTrain_DF, 1))
# print(define_pClassAdultProb(dTitTrain_DF, 1, 'female'))
# print(define_pClassChildProb(dTitTrain_DF, 2))
# print(define_pClassAdultProb(dTitTrain_DF, 2, 'female'))
# print(define_pClassAdultProb(dTitTrain_DF, 3, 'female'))
# #sibsp and parch
test_file = open('test.csv', 'rb')
test_file_object = csv.reader(test_file)
header = test_file_object.next()
prediction_file = open("genderPclassbasedmodel.csv", "wb")
prediction_file_object = csv.writer(prediction_file)
prediction_file_object.writerow(["PassengerId", "Survived"])
for row in test_file_object: # For each row in test.csv
weight = 0.0
if row[1] == 1:
weight = weight + 9
elif row[1] == 2:
weight = weight + 8
else:
weight = 5
if row[3] == 'female':
weight = weight + 8
else:
weight = weight + 2
if row[4] < 21:
# child
weight = weight + 6
else:
# adult
weight = weight + 5
aFam = row[5] + row[6]
if aFam > 0:
weight = weight + 5
else:
weight = weight + 3
weightScore = weight/40.0
print(str(weightScore))
if(weightScore >= .5):
prediction_file_object.writerow([row[0],'1'])
else:
prediction_file_object.writerow([row[0],'0'])
#prediction_file_object.writerow([row[0],'1'])
#prediction_file_object.writerow([row[0],'0'])
test_file.close()
prediction_file.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Distribution of Passengers
Step2: Distribution of Genders in pClass populations
Step3: Age - Analysis | Graph
Step4: Distrbution of Age in passenger population
Step5: Distribution of Age in pClass population
Step6: Distribution of passengers into adult and children age groups (Child = less than 21 years of age)
Step7: Distribution of child and adult (male and female) age groups by age
Step8: Distribution of child and adult (male and female) by pClass
Step9: Alone or Family
Step10: Locational
Step11: Locational
Step12: Surivival Graph Comparison
Step13: Survival by Gender
Step14: Survival by Pclass
Step15: Survival Pclass and Gender
Step16: Survival By Pclass and Age Group (Adult (Male / Female) / Child)
Step17: Survival by Age Distribution
Step18: Survival by Alone or with Family
Step19: Survival pClass by Age Distribution
Step20: Survival Gender by Age Distribution
Step21: Process CSV - Generation of Estimation Survival Table
|
13,241
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
print("NumPy version:",np.__version__)
print("Pandas version:",pd.__version__)
%ls
steel_df = pd.read_excel("steel1045.xls")
al_df = pd.read_excel("aluminum6061.xls")
steel_df.head()
al_df.head()
strain_steel = steel_df['CH5']*0.01
d_steel = 0.506 # test bar diameter = 0.506 inches
stress_steel = (steel_df['FORCE']*0.001)/(np.pi*((d_steel/2)**2))
strain_al = al_df['CH5']*0.01
d_al = 0.506 # test bar diameter = 0.506 inches
stress_al = (al_df['FORCE']*0.001)/(np.pi*((d_al/2)**2))
plt.plot(strain_steel,stress_steel,strain_al,stress_al)
plt.show()
plt.plot(strain_steel,stress_steel,strain_al,stress_al)
plt.xlabel('strain (in/in)')
plt.ylabel('stress (ksi)')
plt.title('Stress Strain Curve of Steel 1045 and Aluminum 6061 in tension')
plt.legend(['Steel 1045','Aluminum 6061'])
plt.show()
plt.plot(strain_steel,stress_steel,strain_al,stress_al)
plt.xlabel('strain (in/in)')
plt.ylabel('stress (ksi)')
plt.title('Stress Strain Curve of Steel 1045 and Aluminum 6061 in tension')
plt.legend(['Steel 1045','Aluminum 6061'])
plt.savefig('stress-strain_curve.png', dpi=300, bbox_inches='tight')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ensure the two .xls data files are in the same folder as the Jupyter notebook
Step2: We can see our Jupyter notebook stress_strain_curve_with_python.ipynb as well as the two .xls data files aluminum6061.xls and steel1045.xls are in our current folder.
Step3: We can use Pandas .head() method to view the first five rows of each dataframe.
Step4: We see a number of columns in each dataframe. The columns we are interested in are FORCE, EXT, and CH5. Below is a description of what these columns mean.
Step5: Build a quick plot
Step6: We see a plot with two lines. One line represents the steel sample and one line represents the aluminum sample. We can improve our plot by adding axis labels with units, a title and a legend.
Step7: The plot we see has two lines, axis labels, a title and a legend. Next we'll save the plot to a .png image file.
|
13,242
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
import matplotlib.image as mpimg
from IPython.display import Image
from astropy.io import fits
import aplpy
#Disable astropy/aplpy logging
import logging
logger0 = logging.getLogger('astropy')
logger0.setLevel(logging.CRITICAL)
logger1 = logging.getLogger('aplpy')
logger1.setLevel(logging.CRITICAL)
from IPython.display import HTML
HTML('../style/code_toggle.html')
def generalGauss2d(x0, y0, sigmax, sigmay, amp=1., theta=0.):
Return a normalized general 2-D Gaussian function
x0,y0: centre position
sigmax, sigmay: standard deviation
amp: amplitude
theta: rotation angle (deg)
#norm = amp * (1./(2.*np.pi*(sigmax*sigmay))) #normalization factor
norm = amp
rtheta = theta * 180. / np.pi #convert to radians
#general function parameters (https://en.wikipedia.org/wiki/Gaussian_function)
a = (np.cos(rtheta)**2.)/(2.*(sigmax**2.)) + (np.sin(rtheta)**2.)/(2.*(sigmay**2.))
b = -1.*(np.sin(2.*rtheta))/(4.*(sigmax**2.)) + (np.sin(2.*rtheta))/(4.*(sigmay**2.))
c = (np.sin(rtheta)**2.)/(2.*(sigmax**2.)) + (np.cos(rtheta)**2.)/(2.*(sigmay**2.))
return lambda x,y: norm * np.exp(-1. * (a * ((x - x0)**2.) - 2.*b*(x-x0)*(y-y0) + c * ((y-y0)**2.)))
def genRstoredBeamImg(fitsImg):
Generate an image of the restored PSF beam based on the FITS header and image size
fh = fits.open(fitsImg)
#get the restoring beam information from the FITS header
bmin = fh[0].header['BMIN'] #restored beam minor axis (deg)
bmaj = fh[0].header['BMAJ'] #restored beam major axis (deg)
bpa = fh[0].header['BPA'] #restored beam angle (deg)
dRA = fh[0].header['CDELT1'] #pixel size in RA direction (deg)
ra0 = fh[0].header['CRPIX1'] #centre RA pixel
dDec = fh[0].header['CDELT2'] #pixel size in Dec direction (deg)
dec0 = fh[0].header['CRPIX2'] #centre Dec pixel
#construct 2-D ellipitcal Gaussian function
gFunc = generalGauss2d(0., 0., bmin/2., bmaj/2., theta=bpa)
#produce an restored PSF beam image
imgSize = 2.*(ra0-1) #assumes a square image
xpos, ypos = np.mgrid[0:imgSize, 0:imgSize].astype(float) #make a grid of pixel indicies
xpos -= ra0 #recentre
ypos -= dec0 #recentre
xpos *= dRA #convert pixel number to degrees
ypos *= dDec #convert pixel number to degrees
return gFunc(xpos, ypos) #restored PSF beam image
def convolveBeamSky(beamImg, skyModel):
Convolve a beam (PSF or restored) image with a sky model image, images must be the same shape
sampFunc = np.fft.fft2(beamImg) #sampling function
skyModelVis = np.fft.fft2(skyModel[0,0]) #sky model visibilities
sampModelVis = sampFunc * skyModelVis #sampled sky model visibilities
return np.abs(np.fft.fftshift(np.fft.ifft2(sampModelVis))) #sky model convolved with restored beam
fig = plt.figure(figsize=(16, 7))
fh = fits.open('../data/fits/deconv/KAT-7_6h60s_dec-30_10MHz_10chans_uniform_n1000-residual.fits')
residualImg = fh[0].data
fh = fits.open('../data/fits/deconv/KAT-7_6h60s_dec-30_10MHz_10chans_uniform_n1000-model.fits')
skyModel = fh[0].data
#generate a retored PSF beam image
restBeam = genRstoredBeamImg(
'../data/fits/deconv/KAT-7_6h60s_dec-30_10MHz_10chans_uniform_n1000-residual.fits')
#convolve restored beam image with skymodel
convImg = convolveBeamSky(restBeam, skyModel)
gc1 = aplpy.FITSFigure(residualImg, figure=fig, subplot=[0.1,0.1,0.35,0.8])
gc1.show_colorscale(vmin=-1.5, vmax=2, cmap='viridis')
gc1.hide_axis_labels()
gc1.hide_tick_labels()
plt.title('Residual Image (niter=1000)')
gc1.add_colorbar()
gc2 = aplpy.FITSFigure(convImg, figure=fig, subplot=[0.5,0.1,0.35,0.8])
gc2.show_colorscale(vmin=0., vmax=2.5, cmap='viridis')
gc2.hide_axis_labels()
gc2.hide_tick_labels()
plt.title('Sky Model')
gc2.add_colorbar()
fig.canvas.draw()
fig = plt.figure(figsize=(16, 7))
gc1 = aplpy.FITSFigure('../data/fits/deconv/KAT-7_6h60s_dec-30_10MHz_10chans_uniform_n100-dirty.fits', \
figure=fig, subplot=[0.1,0.1,0.35,0.8])
gc1.show_colorscale(vmin=-1.5, vmax=3., cmap='viridis')
gc1.hide_axis_labels()
gc1.hide_tick_labels()
plt.title('Dirty Image')
gc1.add_colorbar()
gc2 = aplpy.FITSFigure('../data/fits/deconv/KAT-7_6h60s_dec-30_10MHz_10chans_uniform_n100-image.fits', \
figure=fig, subplot=[0.5,0.1,0.35,0.8])
gc2.show_colorscale(vmin=-1.5, vmax=3., cmap='viridis')
gc2.hide_axis_labels()
gc2.hide_tick_labels()
plt.title('Deconvolved Image')
gc2.add_colorbar()
fig.canvas.draw()
#load deconvolved image
fh = fits.open('../data/fits/deconv/KAT-7_6h60s_dec-30_10MHz_10chans_uniform_n100-image.fits')
deconvImg = fh[0].data
#load residual image
fh = fits.open('../data/fits/deconv/KAT-7_6h60s_dec-30_10MHz_10chans_uniform_n100-residual.fits')
residImg = fh[0].data
peakI = np.max(deconvImg)
print 'Peak Flux: %f Jy'%(peakI)
print 'Dynamic Range:'
#method 1
noise = np.std(deconvImg)
print '\tMethod 1:', peakI/noise
#method 2
noise = np.std(residImg)
print '\tMethod 2:', peakI/noise
#method 3
noise = np.std(np.random.choice(deconvImg.flatten(), int(deconvImg.size*.01))) #randomly sample 1% of pixels
print '\tMethod 3:', peakI/noise
#method 4, region 1
noise = np.std(deconvImg[0,0,0:128,0:128]) #corner of image
print '\tMethod 4a:', peakI/noise
#method 4, region 2
noise = np.std(deconvImg[0,0,192:320,192:320]) #centre of image
print '\tMethod 4b:', peakI/noise
fig = plt.figure(figsize=(8, 7))
gc1 = aplpy.FITSFigure('../data/fits/deconv/KAT-7_6h60s_dec-30_10MHz_10chans_uniform_n100-residual.fits', \
figure=fig)
gc1.show_colorscale(vmin=-1.5, vmax=3., cmap='viridis')
gc1.hide_axis_labels()
gc1.hide_tick_labels()
plt.title('Residual Image')
gc1.add_colorbar()
fig.canvas.draw()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import section specific modules
Step5: 6.4 Residuals and Image Quality<a id='deconv
Step6: Figure
Step7: Left
Step8: Method 1 will always result in a lower dynamic range than Method 2 as the deconvoled image includes the sources where method 2 only uses the residuals. Method 3 will result in a dynamic range which varies depending on the number of pixels sampled and which pixels are sampled. One could imagine an unlucky sampling where every pixel chosen is part of a source, resulting in a large standard deviation. Method 4 depends on the region used to compute the noise. In the Method 4a result a corner of the image, where there are essantially no sources, results in a high dynamic range. On the other hand, choosing the centre region to compute the noise standard deviation results in a low dynamic range. This variation between methods can lead to people playing 'the dynamic range game' where someone can pick the result that best fits what they want to say about the image. Be careful, and make sure your dynamic range metric is well defined and unbaised.
|
13,243
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from halomod import HaloModel
from scipy.interpolate import InterpolatedUnivariateSpline as spline
hm = HaloModel(profile_model="Einasto")
_ = hm.profile.rho(hm.r,hm.m)
hm.update(profile_model="Einasto")
plot(hm.r, hm.profile.rho(hm.r,1e12),label="m=12",color="b")
plot(hm.r, hm.profile.rho(hm.r,1e14),label="m=14",color="r")
plot(hm.r, hm.profile.rho(hm.r,1e16),label="m=16",color="g")
hm.update(profile_model="NFW")
plot(hm.r, hm.profile.rho(hm.r,1e12),label="m=12",ls="--",color="b")
plot(hm.r, hm.profile.rho(hm.r,1e14),label="m=14",ls="--",color="r")
plot(hm.r, hm.profile.rho(hm.r,1e16),label="m=16",ls="--",color="g")
legend(loc=0)
xscale('log')
yscale('log')
show()
hm.update(profile_model="Einasto")
plot(hm.m, hm.profile.rho(0.01,hm.m),label="r=0.01",color="b")
plot(hm.m, hm.profile.rho(0.1,hm.m),label="r=0.1",color="r")
plot(hm.m, hm.profile.rho(1.0,hm.m),label="r=1",color="g")
hm.update(profile_model="NFW")
plot(hm.m, hm.profile.rho(0.01,hm.m),ls="--",color="b")
plot(hm.m, hm.profile.rho(0.1,hm.m),ls="--",color="r")
plot(hm.m, hm.profile.rho(1.0,hm.m),ls="--",color="g")
legend(loc=0)
xscale('log')
yscale('log')
show()
hm.update(profile_model="Einasto")
#plot(hm.k, hm.profile.u(hm.k,1e12),label="m=12",color="b")
#print hm.profile.u(hm.k,1e12)
#plot(hm.k, hm.profile.u(hm.k,1e14),label="m=14",color="r")
#plot(hm.k, hm.profile.u(hm.k,1e16),label="m=16",color="g")
plot(hm.k, hm.profile.u(hm.k,1e12),label="m=12",color="b")
#print hm.profile.u(hm.k,1e12)
plot(hm.k, hm.profile.u(hm.k,1e14),label="m=14",color="r")
plot(hm.k, hm.profile.u(hm.k,1e16),label="m=16",color="g")
hm.update(profile_model="NFW")
plot(hm.k, hm.profile.u(hm.k,1e12),label="m=12",ls="--",color="b")
plot(hm.k, hm.profile.u(hm.k,1e14),label="m=14",ls="--",color="r")
plot(hm.k, hm.profile.u(hm.k,1e16),label="m=16",ls="--",color="g")
hm.update(profile_model="Einasto")
legend(loc=0)
xscale('log')
yscale('log')
show()
hm.update(profile_model="Einasto")
plot(hm.m, hm.profile.u(0.01,hm.m),label="k=0.01",color="b")
plot(hm.m, hm.profile.u(5,hm.m),label="k=5",color="r")
plot(hm.m, hm.profile.u(1000,hm.m),label="k=1000",color="g")
hm.update(profile_model="NFW")
plot(hm.m, hm.profile.u(0.01,hm.m),ls="--",color="b")
plot(hm.m, hm.profile.u(5,hm.m),ls="--",color="r")
plot(hm.m, hm.profile.u(1000,hm.m),ls="--",color="g")
legend(loc=0)
xscale('log')
yscale('log')
show()
hm.update(profile_model="Einasto")
%timeit hm.profile.u(hm.k,hm.m)
hm.update(profile_model="NFW")
%timeit hm.profile.u(hm.k,hm.m)
def f(x,a=0.18):
return np.exp((-2/a)*(x**a-1))
def _p(K, c):
minsteps = 1000
res = np.zeros((len(K),len(c)))
for ik, kappa in enumerate(K):
smallest_period = np.pi / kappa
dx = smallest_period / 8
nsteps = max(int(np.ceil(c.max() / dx)),minsteps)
x, dx = np.linspace(0, c.max(), nsteps, retstep=True)
spl = spline(x, x*f(x)*np.sin(kappa*x)/kappa)
intg = spl.antiderivative()
res[ik,:] = intg(c) - intg(0)
return np.clip(res,0,None)
K = np.logspace(-4,4,500)
c = np.logspace(0,2,1000)
pk = _p(K,c)
#plot(K,pk)
#xscale('log')
#yscale('log')
np.savez("uKc_einasto.npz",pk=pk,K=K,c=c)
from scipy.interpolate import RectBivariateSpline
def _newp(K,c):
data = np.load("uKc_einasto.npz")
pk = data['pk']
_k = data['K']
_c = data['c']
c = np.atleast_1d(c)
if np.isscalar(K):
K = np.atleast_2d(K)
if K.ndim < 2:
if len(K)!=len(c):
K = np.atleast_2d(K).T # should be len(rs) x len(k)
else:
K = np.atleast_2d(K)
pk[pk<=0] = 1e-8
spl = RectBivariateSpline(np.log(_k),np.log(_c),np.log(pk))
cc = np.repeat(c,K.shape[0])
return np.exp(hm.profile._reduce(spl.ev(np.log(K.flatten()),np.log(cc)).reshape(K.shape)))
c,K = hm.profile._get_k_variables(hm.k,hm.m)
%timeit _newp(K,c)
plot(np.logspace(-4,4,500),pk[:,0]/ _newp(np.logspace(-4,4,500),1))
plot(np.logspace(-4,4,500),_p(np.logspace(-4,4,500),np.atleast_1d(1)).flatten()/_newp(np.logspace(-4,4,500),1))
plot(np.logspace(-4,4,500),pk[:,0]/ _p(np.logspace(-4,4,500),np.atleast_1d(1)).flatten())
plot()
xscale('log')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Density Profile
Step2: Now plot versus $r$
Step3: Now plot versus $m$
Step4: Fourier Transform
Step5: Now plot against $m$
Step6: We may have to be a bit wary of the high-mass, high-k tail, but other than that we should be okay. Finally, to make sure things run properly, try full matrix
Step7: Perhaps it's better to pre-cache results.
|
13,244
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, SkewT
from metpy.units import units
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 115, 100)
skew = SkewT(fig, rotation=45)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Calculate LCL height and plot as black dot
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Calculate full parcel profile and add to plot as black line
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
skew.plot(p, prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, prof)
skew.shade_cape(p, T, prof)
# An example of a slanted line at constant T -- in this case the 0
# isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Upper air data can be obtained using the siphon package, but for this example we will use
Step2: We will pull the data out of the example dataset into individual variables and
Step3: Create a new figure. The dimensions here give a good aspect ratio.
|
13,245
|
<ASSISTANT_TASK:>
Python Code:
# initialize hod model
model = PrebuiltHodModelFactory('zheng07', threshold=-21)
halocat = CachedHaloCatalog(simname='multidark', redshift=0, halo_finder='rockstar')
model.populate_mock(halocat, enforce_PBC=False)
N_sat = len(np.where(model.mock.galaxy_table['gal_type'] == 'satellites')[0])
N_gal = len(model.mock.galaxy_table['gal_type'])
print 'f_sat = ', np.float(N_sat)/np.float(N_gal)
sub_model = PrebuiltHodModelFactory('zheng07', threshold=-21)
sub_model.new_haloprop_func_dict = {'sim_subvol': util.mk_id_column}
sub_halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
for rint in range(10):
simsubvol = lambda x: util.mask_func(x, rint)
sub_model.populate_mock(sub_halocat, masking_function=simsubvol, enforce_PBC=False)
sub_N_sat = len(np.where(sub_model.mock.galaxy_table['gal_type'] == 'satellites')[0])
sub_N_gal = len(sub_model.mock.galaxy_table['gal_type'])
print 'f_sat = ', np.float(sub_N_sat)/np.float(sub_N_gal)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note that changing the PBC condition enforce_PBC option does not change the $f_{sat}$ value.
|
13,246
|
<ASSISTANT_TASK:>
Python Code:
# Importamos librerías
import numpy as np
import pandas as pd
import pandas_datareader as data
import matplotlib.pyplot as plt
%matplotlib inline
# Creamos la función
def load_adj_close(ticker, data_source, start_date, end_date):
panel_data = data.DataReader(ticker, data_source, start_date, end_date)
closes = panel_data.loc['Adj Close']
all_weekdays = pd.date_range(start=start_date, end=end_date)
closes = closes.reindex(all_weekdays)
closes = closes.fillna(method='ffill')
return closes
# Descargamos datos de microsoft en el 2016
ticker = ['NVDA']
data_source = 'yahoo'
start_date = '2016-01-04'
end_date = '2016-12-31'
closes = load_adj_close(ticker, data_source, start_date, end_date)
# Grafiquemos
closes.plot(figsize=(8,6));
# Función que devuelve rendimientos diarios, media y desviación estándar
def mu_std_daily_ret(closes):
daily_returns = (np.log(closes/closes.shift(1)))[1:]
mu = daily_returns.mean().values[0]
sigma = daily_returns.std().values[0]
return daily_returns, mu, sigma
# Calculamos con la función anterior
daily_returns, mu, sigma = mu_std_daily_ret(closes)
mu, sigma
# Graficamos rendimientos diarios
daily_returns.plot(figsize=(8,6));
# Función que simula varios escenarios de rendimientos diarios
def daily_ret_sim(mu, sigma, ndays, ntraj, start_date):
dates = pd.date_range(start=start_date,periods=ndays)
return pd.DataFrame(sigma*np.random.randn(ndays, ntraj)+mu, index = dates)
# Simulamos 100 escenarios para todo el 2017
ndays = 360
ntraj = 100
start_date = '2017-01-01'
simret = daily_ret_sim(mu, sigma, ndays, ntraj, start_date)
# Función de proyección de precios
def closes_proj(simret, closes):
return (closes.iloc[-1,:].values[0])*np.exp(simret.cumsum())
# Proyección de precios y concatenación con precios de 2016
simdata = closes_proj(simret, closes)
simdata
# Gráfico
simdata.plot(figsize=(8,6),legend=False);
K = 200
dates = pd.date_range('20170101',periods=ndays)
strike = pd.DataFrame({'Strike':K*np.ones(ndays)},index=dates)
simul = pd.concat([closes.T,simdata.T,strike.T]).T
simul.plot(figsize=(8,6),legend=False);
strike = pd.DataFrame(K*np.ones(ndays*ntraj).reshape((ndays,ntraj)),index=dates)
count = simdata>strike
prob = count.T.sum()/ntraj
prob.plot(figsize=(8,6),legend=False);
((K-closes.iloc[-1,:])/closes.iloc[-1,:]).values
closes.iloc[-1,:]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Proyección de rendimientos diarios
Step2: Habiendo caracterizado los rendimientos diarios como una variable aleatoria normal con la media y la varianza muestral obtenida de los datos del 2016, podemos generar números aleatorios con estas características para simular el comportamiento de los precios de cierre de las acciones en el 2017 (hay un supuesto de que las cosas no cambiarán fundamentalmente).
Step3: 3. Proyección de precios de cierre
Step4: 4. Probabilidad Precio-Umbral
|
13,247
|
<ASSISTANT_TASK:>
Python Code:
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "classification"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
def sort_by_target(mnist):
reorder_train = np.array(sorted([(target, i) for i, target in enumerate(mnist.target[:60000])]))[:, 1]
reorder_test = np.array(sorted([(target, i) for i, target in enumerate(mnist.target[60000:])]))[:, 1]
mnist.data[:60000] = mnist.data[reorder_train]
mnist.target[:60000] = mnist.target[reorder_train]
mnist.data[60000:] = mnist.data[reorder_test + 60000]
mnist.target[60000:] = mnist.target[reorder_test + 60000]
try:
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version=1, cache=True, as_frame=False)
mnist.target = mnist.target.astype(np.int8) # fetch_openml() returns targets as strings
sort_by_target(mnist) # fetch_openml() returns an unsorted dataset
except ImportError:
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
mnist["data"], mnist["target"]
mnist.data.shape
X, y = mnist["data"], mnist["target"]
X.shape
y.shape
28*28
some_digit = X[36000]
some_digit_image = some_digit.reshape(28, 28)
plt.imshow(some_digit_image, cmap = mpl.cm.binary,
interpolation="nearest")
plt.axis("off")
save_fig("some_digit_plot")
plt.show()
def plot_digit(data):
image = data.reshape(28, 28)
plt.imshow(image, cmap = mpl.cm.binary,
interpolation="nearest")
plt.axis("off")
# EXTRA
def plot_digits(instances, images_per_row=10, **options):
size = 28
images_per_row = min(len(instances), images_per_row)
images = [instance.reshape(size,size) for instance in instances]
n_rows = (len(instances) - 1) // images_per_row + 1
row_images = []
n_empty = n_rows * images_per_row - len(instances)
images.append(np.zeros((size, size * n_empty)))
for row in range(n_rows):
rimages = images[row * images_per_row : (row + 1) * images_per_row]
row_images.append(np.concatenate(rimages, axis=1))
image = np.concatenate(row_images, axis=0)
plt.imshow(image, cmap = mpl.cm.binary, **options)
plt.axis("off")
plt.figure(figsize=(9,9))
example_images = np.r_[X[:12000:600], X[13000:30600:600], X[30600:60000:590]]
plot_digits(example_images, images_per_row=10)
save_fig("more_digits_plot")
plt.show()
y[36000]
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
import numpy as np
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(max_iter=5, tol=-np.infty, random_state=42)
sgd_clf.fit(X_train, y_train_5)
sgd_clf.predict([some_digit])
from sklearn.model_selection import cross_val_score
cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy")
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
skfolds = StratifiedKFold(n_splits=3, random_state=42, shuffle=True)
for train_index, test_index in skfolds.split(X_train, y_train_5):
clone_clf = clone(sgd_clf)
X_train_folds = X_train[train_index]
y_train_folds = (y_train_5[train_index])
X_test_fold = X_train[test_index]
y_test_fold = (y_train_5[test_index])
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_fold)
n_correct = sum(y_pred == y_test_fold)
print(n_correct / len(y_pred))
from sklearn.base import BaseEstimator
class Never5Classifier(BaseEstimator):
def fit(self, X, y=None):
pass
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
never_5_clf = Never5Classifier()
cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring="accuracy")
from sklearn.model_selection import cross_val_predict
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train_5, y_train_pred)
y_train_perfect_predictions = y_train_5
confusion_matrix(y_train_5, y_train_perfect_predictions)
from sklearn.metrics import precision_score, recall_score
precision_score(y_train_5, y_train_pred)
4344 / (4344 + 1307)
recall_score(y_train_5, y_train_pred)
4344 / (4344 + 1077)
from sklearn.metrics import f1_score
f1_score(y_train_5, y_train_pred)
4344 / (4344 + (1077 + 1307)/2)
y_scores = sgd_clf.decision_function([some_digit])
y_scores
threshold = 0
y_some_digit_pred = (y_scores > threshold)
y_some_digit_pred
threshold = 200000
y_some_digit_pred = (y_scores > threshold)
y_some_digit_pred
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3,
method="decision_function")
y_scores.shape
# hack to work around issue #9589 in Scikit-Learn 0.19.0
if y_scores.ndim == 2:
y_scores = y_scores[:, 1]
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.xlabel("Threshold", fontsize=16)
plt.legend(loc="upper left", fontsize=16)
plt.ylim([0, 1])
plt.figure(figsize=(8, 4))
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.xlim([-700000, 700000])
save_fig("precision_recall_vs_threshold_plot")
plt.show()
(y_train_pred == (y_scores > 0)).all()
y_train_pred_90 = (y_scores > 70000)
precision_score(y_train_5, y_train_pred_90)
recall_score(y_train_5, y_train_pred_90)
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
save_fig("precision_vs_recall_plot")
plt.show()
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.figure(figsize=(8, 6))
plot_roc_curve(fpr, tpr)
save_fig("roc_curve_plot")
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score(y_train_5, y_scores)
from sklearn.ensemble import RandomForestClassifier
forest_clf = RandomForestClassifier(n_estimators=10, random_state=42)
y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3,
method="predict_proba")
y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5,y_scores_forest)
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, "b:", linewidth=2, label="SGD")
plot_roc_curve(fpr_forest, tpr_forest, "Random Forest")
plt.legend(loc="lower right", fontsize=16)
save_fig("roc_curve_comparison_plot")
plt.show()
roc_auc_score(y_train_5, y_scores_forest)
y_train_pred_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3)
precision_score(y_train_5, y_train_pred_forest)
recall_score(y_train_5, y_train_pred_forest)
sgd_clf.fit(X_train, y_train)
sgd_clf.predict([some_digit])
some_digit_scores = sgd_clf.decision_function([some_digit])
some_digit_scores
np.argmax(some_digit_scores)
sgd_clf.classes_
sgd_clf.classes_[5]
from sklearn.multiclass import OneVsOneClassifier
ovo_clf = OneVsOneClassifier(SGDClassifier(max_iter=5, tol=-np.infty, random_state=42))
ovo_clf.fit(X_train, y_train)
ovo_clf.predict([some_digit])
len(ovo_clf.estimators_)
forest_clf.fit(X_train, y_train)
forest_clf.predict([some_digit])
forest_clf.predict_proba([some_digit])
cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy")
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float64))
cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring="accuracy")
y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
conf_mx
def plot_confusion_matrix(matrix):
If you prefer color and a colorbar
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
plt.matshow(conf_mx, cmap=plt.cm.gray)
save_fig("confusion_matrix_plot", tight_layout=False)
plt.show()
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
save_fig("confusion_matrix_errors_plot", tight_layout=False)
plt.show()
cl_a, cl_b = 3, 5
X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]
X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]
X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]
X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]
plt.figure(figsize=(8,8))
plt.subplot(221); plot_digits(X_aa[:25], images_per_row=5)
plt.subplot(222); plot_digits(X_ab[:25], images_per_row=5)
plt.subplot(223); plot_digits(X_ba[:25], images_per_row=5)
plt.subplot(224); plot_digits(X_bb[:25], images_per_row=5)
save_fig("error_analysis_digits_plot")
plt.show()
from sklearn.neighbors import KNeighborsClassifier
y_train_large = (y_train >= 7)
y_train_odd = (y_train % 2 == 1)
y_multilabel = np.c_[y_train_large, y_train_odd]
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train, y_multilabel)
knn_clf.predict([some_digit])
y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3, n_jobs=-1)
f1_score(y_multilabel, y_train_knn_pred, average="macro")
noise = np.random.randint(0, 100, (len(X_train), 784))
X_train_mod = X_train + noise
noise = np.random.randint(0, 100, (len(X_test), 784))
X_test_mod = X_test + noise
y_train_mod = X_train
y_test_mod = X_test
some_index = 5500
plt.subplot(121); plot_digit(X_test_mod[some_index])
plt.subplot(122); plot_digit(y_test_mod[some_index])
save_fig("noisy_digit_example_plot")
plt.show()
knn_clf.fit(X_train_mod, y_train_mod)
clean_digit = knn_clf.predict([X_test_mod[some_index]])
plot_digit(clean_digit)
save_fig("cleaned_digit_example_plot")
from sklearn.dummy import DummyClassifier
dmy_clf = DummyClassifier()
y_probas_dmy = cross_val_predict(dmy_clf, X_train, y_train_5, cv=3, method="predict_proba")
y_scores_dmy = y_probas_dmy[:, 1]
fprr, tprr, thresholdsr = roc_curve(y_train_5, y_scores_dmy)
plot_roc_curve(fprr, tprr)
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier(n_jobs=-1, weights='distance', n_neighbors=4)
knn_clf.fit(X_train, y_train)
y_knn_pred = knn_clf.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_knn_pred)
from scipy.ndimage.interpolation import shift
def shift_digit(digit_array, dx, dy, new=0):
return shift(digit_array.reshape(28, 28), [dy, dx], cval=new).reshape(784)
plot_digit(shift_digit(some_digit, 5, 1, new=100))
X_train_expanded = [X_train]
y_train_expanded = [y_train]
for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):
shifted_images = np.apply_along_axis(shift_digit, axis=1, arr=X_train, dx=dx, dy=dy)
X_train_expanded.append(shifted_images)
y_train_expanded.append(y_train)
X_train_expanded = np.concatenate(X_train_expanded)
y_train_expanded = np.concatenate(y_train_expanded)
X_train_expanded.shape, y_train_expanded.shape
knn_clf.fit(X_train_expanded, y_train_expanded)
y_knn_expanded_pred = knn_clf.predict(X_test)
accuracy_score(y_test, y_knn_expanded_pred)
ambiguous_digit = X_test[2589]
knn_clf.predict_proba([ambiguous_digit])
plot_digit(ambiguous_digit)
from sklearn.model_selection import GridSearchCV
param_grid = [{'weights': ["uniform", "distance"], 'n_neighbors': [3, 4, 5]}]
knn_clf = KNeighborsClassifier()
grid_search = GridSearchCV(knn_clf, param_grid, cv=5, verbose=3, n_jobs=-1)
grid_search.fit(X_train, y_train)
grid_search.best_params_
grid_search.best_score_
from sklearn.metrics import accuracy_score
y_pred = grid_search.predict(X_test)
accuracy_score(y_test, y_pred)
from scipy.ndimage.interpolation import shift
def shift_image(image, dx, dy):
image = image.reshape((28, 28))
shifted_image = shift(image, [dy, dx], cval=0, mode="constant")
return shifted_image.reshape([-1])
image = X_train[1000]
shifted_image_down = shift_image(image, 0, 5)
shifted_image_left = shift_image(image, -5, 0)
plt.figure(figsize=(12,3))
plt.subplot(131)
plt.title("Original", fontsize=14)
plt.imshow(image.reshape(28, 28), interpolation="nearest", cmap="Greys")
plt.subplot(132)
plt.title("Shifted down", fontsize=14)
plt.imshow(shifted_image_down.reshape(28, 28), interpolation="nearest", cmap="Greys")
plt.subplot(133)
plt.title("Shifted left", fontsize=14)
plt.imshow(shifted_image_left.reshape(28, 28), interpolation="nearest", cmap="Greys")
plt.show()
X_train_augmented = [image for image in X_train]
y_train_augmented = [label for label in y_train]
for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):
for image, label in zip(X_train, y_train):
X_train_augmented.append(shift_image(image, dx, dy))
y_train_augmented.append(label)
X_train_augmented = np.array(X_train_augmented)
y_train_augmented = np.array(y_train_augmented)
shuffle_idx = np.random.permutation(len(X_train_augmented))
X_train_augmented = X_train_augmented[shuffle_idx]
y_train_augmented = y_train_augmented[shuffle_idx]
knn_clf = KNeighborsClassifier(**grid_search.best_params_)
knn_clf.fit(X_train_augmented, y_train_augmented)
y_pred = knn_clf.predict(X_test)
accuracy_score(y_test, y_pred)
import os
TITANIC_PATH = os.path.join("datasets", "titanic")
import pandas as pd
def load_titanic_data(filename, titanic_path=TITANIC_PATH):
csv_path = os.path.join(titanic_path, filename)
return pd.read_csv(csv_path)
train_data = load_titanic_data("train.csv")
test_data = load_titanic_data("test.csv")
train_data.head()
train_data.info()
train_data.describe()
train_data["Survived"].value_counts()
train_data["Pclass"].value_counts()
train_data["Sex"].value_counts()
train_data["Embarked"].value_counts()
from sklearn.base import BaseEstimator, TransformerMixin
# A class to select numerical or categorical columns
# since Scikit-Learn doesn't handle DataFrames yet
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names]
from sklearn.pipeline import Pipeline
try:
from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+
except ImportError:
from sklearn.preprocessing import Imputer as SimpleImputer
num_pipeline = Pipeline([
("select_numeric", DataFrameSelector(["Age", "SibSp", "Parch", "Fare"])),
("imputer", SimpleImputer(strategy="median")),
])
num_pipeline.fit_transform(train_data)
# Inspired from stackoverflow.com/questions/25239958
class MostFrequentImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.most_frequent_ = pd.Series([X[c].value_counts().index[0] for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.most_frequent_)
try:
from sklearn.preprocessing import OrdinalEncoder # just to raise an ImportError if Scikit-Learn < 0.20
from sklearn.preprocessing import OneHotEncoder
except ImportError:
from future_encoders import OneHotEncoder # Scikit-Learn < 0.20
cat_pipeline = Pipeline([
("select_cat", DataFrameSelector(["Pclass", "Sex", "Embarked"])),
("imputer", MostFrequentImputer()),
("cat_encoder", OneHotEncoder(sparse=False)),
])
cat_pipeline.fit_transform(train_data)
from sklearn.pipeline import FeatureUnion
preprocess_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", num_pipeline),
("cat_pipeline", cat_pipeline),
])
X_train = preprocess_pipeline.fit_transform(train_data)
X_train
y_train = train_data["Survived"]
from sklearn.svm import SVC
svm_clf = SVC(gamma="auto")
svm_clf.fit(X_train, y_train)
X_test = preprocess_pipeline.transform(test_data)
y_pred = svm_clf.predict(X_test)
from sklearn.model_selection import cross_val_score
svm_scores = cross_val_score(svm_clf, X_train, y_train, cv=10)
svm_scores.mean()
from sklearn.ensemble import RandomForestClassifier
forest_clf = RandomForestClassifier(n_estimators=100, random_state=42)
forest_scores = cross_val_score(forest_clf, X_train, y_train, cv=10)
forest_scores.mean()
plt.figure(figsize=(8, 4))
plt.plot([1]*10, svm_scores, ".")
plt.plot([2]*10, forest_scores, ".")
plt.boxplot([svm_scores, forest_scores], labels=("SVM","Random Forest"))
plt.ylabel("Accuracy", fontsize=14)
plt.show()
train_data["AgeBucket"] = train_data["Age"] // 15 * 15
train_data[["AgeBucket", "Survived"]].groupby(['AgeBucket']).mean()
train_data["RelativesOnboard"] = train_data["SibSp"] + train_data["Parch"]
train_data[["RelativesOnboard", "Survived"]].groupby(['RelativesOnboard']).mean()
import os
import tarfile
import urllib.request
DOWNLOAD_ROOT = "http://spamassassin.apache.org/old/publiccorpus/"
HAM_URL = DOWNLOAD_ROOT + "20030228_easy_ham.tar.bz2"
SPAM_URL = DOWNLOAD_ROOT + "20030228_spam.tar.bz2"
SPAM_PATH = os.path.join("datasets", "spam")
def fetch_spam_data(spam_url=SPAM_URL, spam_path=SPAM_PATH):
if not os.path.isdir(spam_path):
os.makedirs(spam_path)
for filename, url in (("ham.tar.bz2", HAM_URL), ("spam.tar.bz2", SPAM_URL)):
path = os.path.join(spam_path, filename)
if not os.path.isfile(path):
urllib.request.urlretrieve(url, path)
tar_bz2_file = tarfile.open(path)
tar_bz2_file.extractall(path=SPAM_PATH)
tar_bz2_file.close()
fetch_spam_data()
HAM_DIR = os.path.join(SPAM_PATH, "easy_ham")
SPAM_DIR = os.path.join(SPAM_PATH, "spam")
ham_filenames = [name for name in sorted(os.listdir(HAM_DIR)) if len(name) > 20]
spam_filenames = [name for name in sorted(os.listdir(SPAM_DIR)) if len(name) > 20]
len(ham_filenames)
len(spam_filenames)
import email
import email.policy
def load_email(is_spam, filename, spam_path=SPAM_PATH):
directory = "spam" if is_spam else "easy_ham"
with open(os.path.join(spam_path, directory, filename), "rb") as f:
return email.parser.BytesParser(policy=email.policy.default).parse(f)
ham_emails = [load_email(is_spam=False, filename=name) for name in ham_filenames]
spam_emails = [load_email(is_spam=True, filename=name) for name in spam_filenames]
print(ham_emails[1].get_content().strip())
print(spam_emails[6].get_content().strip())
def get_email_structure(email):
if isinstance(email, str):
return email
payload = email.get_payload()
if isinstance(payload, list):
return "multipart({})".format(", ".join([
get_email_structure(sub_email)
for sub_email in payload
]))
else:
return email.get_content_type()
from collections import Counter
def structures_counter(emails):
structures = Counter()
for email in emails:
structure = get_email_structure(email)
structures[structure] += 1
return structures
structures_counter(ham_emails).most_common()
structures_counter(spam_emails).most_common()
for header, value in spam_emails[0].items():
print(header,":",value)
spam_emails[0]["Subject"]
import numpy as np
from sklearn.model_selection import train_test_split
X = np.array(ham_emails + spam_emails)
y = np.array([0] * len(ham_emails) + [1] * len(spam_emails))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
import re
from html import unescape
def html_to_plain_text(html):
text = re.sub('<head.*?>.*?</head>', '', html, flags=re.M | re.S | re.I)
text = re.sub('<a\s.*?>', ' HYPERLINK ', text, flags=re.M | re.S | re.I)
text = re.sub('<.*?>', '', text, flags=re.M | re.S)
text = re.sub(r'(\s*\n)+', '\n', text, flags=re.M | re.S)
return unescape(text)
html_spam_emails = [email for email in X_train[y_train==1]
if get_email_structure(email) == "text/html"]
sample_html_spam = html_spam_emails[7]
print(sample_html_spam.get_content().strip()[:1000], "...")
print(html_to_plain_text(sample_html_spam.get_content())[:1000], "...")
def email_to_text(email):
html = None
for part in email.walk():
ctype = part.get_content_type()
if not ctype in ("text/plain", "text/html"):
continue
try:
content = part.get_content()
except: # in case of encoding issues
content = str(part.get_payload())
if ctype == "text/plain":
return content
else:
html = content
if html:
return html_to_plain_text(html)
print(email_to_text(sample_html_spam)[:100], "...")
try:
import nltk
stemmer = nltk.PorterStemmer()
for word in ("Computations", "Computation", "Computing", "Computed", "Compute", "Compulsive"):
print(word, "=>", stemmer.stem(word))
except ImportError:
print("Error: stemming requires the NLTK module.")
stemmer = None
try:
import urlextract # may require an Internet connection to download root domain names
url_extractor = urlextract.URLExtract()
print(url_extractor.find_urls("Will it detect github.com and https://youtu.be/7Pq-S557XQU?t=3m32s"))
except ImportError:
print("Error: replacing URLs requires the urlextract module.")
url_extractor = None
from sklearn.base import BaseEstimator, TransformerMixin
class EmailToWordCounterTransformer(BaseEstimator, TransformerMixin):
def __init__(self, strip_headers=True, lower_case=True, remove_punctuation=True,
replace_urls=True, replace_numbers=True, stemming=True):
self.strip_headers = strip_headers
self.lower_case = lower_case
self.remove_punctuation = remove_punctuation
self.replace_urls = replace_urls
self.replace_numbers = replace_numbers
self.stemming = stemming
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_transformed = []
for email in X:
text = email_to_text(email) or ""
if self.lower_case:
text = text.lower()
if self.replace_urls and url_extractor is not None:
urls = list(set(url_extractor.find_urls(text)))
urls.sort(key=lambda url: len(url), reverse=True)
for url in urls:
text = text.replace(url, " URL ")
if self.replace_numbers:
text = re.sub(r'\d+(?:\.\d*(?:[eE]\d+))?', 'NUMBER', text)
if self.remove_punctuation:
text = re.sub(r'\W+', ' ', text, flags=re.M)
word_counts = Counter(text.split())
if self.stemming and stemmer is not None:
stemmed_word_counts = Counter()
for word, count in word_counts.items():
stemmed_word = stemmer.stem(word)
stemmed_word_counts[stemmed_word] += count
word_counts = stemmed_word_counts
X_transformed.append(word_counts)
return np.array(X_transformed)
X_few = X_train[:3]
X_few_wordcounts = EmailToWordCounterTransformer().fit_transform(X_few)
X_few_wordcounts
from scipy.sparse import csr_matrix
class WordCounterToVectorTransformer(BaseEstimator, TransformerMixin):
def __init__(self, vocabulary_size=1000):
self.vocabulary_size = vocabulary_size
def fit(self, X, y=None):
total_count = Counter()
for word_count in X:
for word, count in word_count.items():
total_count[word] += min(count, 10)
most_common = total_count.most_common()[:self.vocabulary_size]
self.most_common_ = most_common
self.vocabulary_ = {word: index + 1 for index, (word, count) in enumerate(most_common)}
return self
def transform(self, X, y=None):
rows = []
cols = []
data = []
for row, word_count in enumerate(X):
for word, count in word_count.items():
rows.append(row)
cols.append(self.vocabulary_.get(word, 0))
data.append(count)
return csr_matrix((data, (rows, cols)), shape=(len(X), self.vocabulary_size + 1))
vocab_transformer = WordCounterToVectorTransformer(vocabulary_size=10)
X_few_vectors = vocab_transformer.fit_transform(X_few_wordcounts)
X_few_vectors
X_few_vectors.toarray()
vocab_transformer.vocabulary_
from sklearn.pipeline import Pipeline
preprocess_pipeline = Pipeline([
("email_to_wordcount", EmailToWordCounterTransformer()),
("wordcount_to_vector", WordCounterToVectorTransformer()),
])
X_train_transformed = preprocess_pipeline.fit_transform(X_train)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
log_clf = LogisticRegression(solver="liblinear", random_state=42)
score = cross_val_score(log_clf, X_train_transformed, y_train, cv=3, verbose=3)
score.mean()
from sklearn.metrics import precision_score, recall_score
X_test_transformed = preprocess_pipeline.transform(X_test)
log_clf = LogisticRegression(solver="liblinear", random_state=42)
log_clf.fit(X_train_transformed, y_train)
y_pred = log_clf.predict(X_test_transformed)
print("Precision: {:.2f}%".format(100 * precision_score(y_test, y_pred)))
print("Recall: {:.2f}%".format(100 * recall_score(y_test, y_pred)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MNIST
Step2: Binary classifier
Step3: Note
Step4: Note
Step5: ROC curves
Step6: Note
Step8: Multiclass classification
Step9: Multilabel classification
Step10: Warning
Step11: Multioutput classification
Step12: Extra material
Step13: KNN classifier
Step14: Exercise solutions
Step15: 2. Data Augmentation
Step16: By simply augmenting the data, we got a 0.5% accuracy boost.
Step17: The data is already split into a training set and a test set. However, the test data does not contain the labels
Step18: The attributes have the following meaning
Step19: Okay, the Age, Cabin and Embarked attributes are sometimes null (less than 891 non-null), especially the Cabin (77% are null). We will ignore the Cabin for now and focus on the rest. The Age attribute has about 19% null values, so we will need to decide what to do with them. Replacing null values with the median age seems reasonable.
Step20: Yikes, only 38% Survived.
Step21: Now let's take a quick look at all the categorical attributes
Step22: The Embarked attribute tells us where the passenger embarked
Step23: Let's build the pipeline for the numerical attributes
Step24: We will also need an imputer for the string categorical columns (the regular SimpleImputer does not work on those)
Step25: Warning
Step26: Now we can build the pipeline for the categorical attributes
Step27: Finally, let's join the numerical and categorical pipelines
Step28: Cool! Now we have a nice preprocessing pipeline that takes the raw data and outputs numerical input features that we can feed to any Machine Learning model we want.
Step29: Let's not forget to get the labels
Step30: We are now ready to train a classifier. Let's start with an SVC
Step31: Great, our model is trained, let's use it to make predictions on the test set
Step32: And now we could just build a CSV file with these predictions (respecting the format excepted by Kaggle), then upload it and hope for the best. But wait! We can do better than hope. Why don't we use cross-validation to have an idea of how good our model is?
Step33: Okay, over 73% accuracy, clearly better than random chance, but it's not a great score. Looking at the leaderboard for the Titanic competition on Kaggle, you can see that you need to reach above 80% accuracy to be within the top 10% Kagglers. Some reached 100%, but since you can easily find the list of victims of the Titanic, it seems likely that there was little Machine Learning involved in their performance! ;-) So let's try to build a model that reaches 80% accuracy.
Step34: That's much better!
Step35: To improve this result further, you could
Step36: 4. Spam classifier
Step37: Next, let's load all the emails
Step38: We can use Python's email module to parse these emails (this handles headers, encoding, and so on)
Step39: Let's look at one example of ham and one example of spam, to get a feel of what the data looks like
Step40: Some emails are actually multipart, with images and attachments (which can have their own attachments). Let's look at the various types of structures we have
Step41: It seems that the ham emails are more often plain text, while spam has quite a lot of HTML. Moreover, quite a few ham emails are signed using PGP, while no spam is. In short, it seems that the email structure is useful information to have.
Step42: There's probably a lot of useful information in there, such as the sender's email address (12a1mailbot1@web.de looks fishy), but we will just focus on the Subject header
Step43: Okay, before we learn too much about the data, let's not forget to split it into a training set and a test set
Step44: Okay, let's start writing the preprocessing functions. First, we will need a function to convert HTML to plain text. Arguably the best way to do this would be to use the great BeautifulSoup library, but I would like to avoid adding another dependency to this project, so let's hack a quick & dirty solution using regular expressions (at the risk of un̨ho͞ly radiańcé destro҉ying all enli̍̈́̂̈́ghtenment). The following function first drops the <head> section, then converts all <a> tags to the word HYPERLINK, then it gets rid of all HTML tags, leaving only the plain text. For readability, it also replaces multiple newlines with single newlines, and finally it unescapes html entities (such as &gt; or &nbsp;)
Step45: Let's see if it works. This is HTML spam
Step46: And this is the resulting plain text
Step47: Great! Now let's write a function that takes an email as input and returns its content as plain text, whatever its format is
Step48: Let's throw in some stemming! For this to work, you need to install the Natural Language Toolkit (NLTK). It's as simple as running the following command (don't forget to activate your virtualenv first; if you don't have one, you will likely need administrator rights, or use the --user option)
Step49: We will also need a way to replace URLs with the word "URL". For this, we could use hard core regular expressions but we will just use the urlextract library. You can install it with the following command (don't forget to activate your virtualenv first; if you don't have one, you will likely need administrator rights, or use the --user option)
Step50: We are ready to put all this together into a transformer that we will use to convert emails to word counters. Note that we split sentences into words using Python's split() method, which uses whitespaces for word boundaries. This works for many written languages, but not all. For example, Chinese and Japanese scripts generally don't use spaces between words, and Vietnamese often uses spaces even between syllables. It's okay in this exercise, because the dataset is (mostly) in English.
Step51: Let's try this transformer on a few emails
Step52: This looks about right!
Step53: What does this matrix mean? Well, the 64 in the third row, first column, means that the third email contains 64 words that are not part of the vocabulary. The 1 next to it means that the first word in the vocabulary is present once in this email. The 2 next to it means that the second word is present twice, and so on. You can look at the vocabulary to know which words we are talking about. The first word is "of", the second word is "and", etc.
Step54: We are now ready to train our first spam classifier! Let's transform the whole dataset
Step55: Over 98.7%, not bad for a first try!
|
13,248
|
<ASSISTANT_TASK:>
Python Code:
# check Python version
!python -V
import pandas as pd # download library to read data into dataframe
pd.set_option('display.max_columns', None)
recipes = pd.read_csv("https://ibm.box.com/shared/static/5wah9atr5o1akuuavl2z9tkjzdinr1lv.csv")
print("Data read into dataframe!") # takes about 30 seconds
recipes.head()
recipes.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read the data from the IBM server into a pandas dataframe.
Step2: Show the first few rows.
Step3: Get the dimensions of the dataframe.
|
13,249
|
<ASSISTANT_TASK:>
Python Code:
utc = 0
sma = 1
ecc = 2
inc = 3
raan = 4
aop = 5
ma = 6
ta = 7
#fig1 = plt.figure(figsize = [15,8], facecolor='w')
fig_peri = plt.figure(figsize = [15,8], facecolor='w')
fig_apo = plt.figure(figsize = [15,8], facecolor='w')
fig3 = plt.figure(figsize = [15,8], facecolor='w')
fig4 = plt.figure(figsize = [15,8], facecolor='w')
fig4_rap = plt.figure(figsize = [15,8], facecolor='w')
fig5 = plt.figure(figsize = [15,8], facecolor='w')
fig6 = plt.figure(figsize = [15,8], facecolor='w')
#sub1 = fig1.add_subplot(111)
sub_peri = fig_peri.add_subplot(111)
sub_apo = fig_apo.add_subplot(111)
sub3 = fig3.add_subplot(111)
sub4 = fig4.add_subplot(111)
sub4_rap = fig4_rap.add_subplot(111)
sub5 = fig5.add_subplot(111)
sub6 = fig6.add_subplot(111)
subs = [sub_peri, sub_apo, sub3, sub4, sub4_rap, sub5, sub6]
for file in ['orbit_evolution.txt', 'orbit_evolution2.txt', 'orbit_evolution4.txt']:
orbit = load_orbit_file(file)
t = Time(mjd2unixtimestamp(orbit[:,utc]), format='unix')
#sub1.plot(t.datetime, orbit[:,sma])
sub_peri.plot(t.datetime, orbit[:,sma]*(1-orbit[:,ecc]))
sub_apo.plot(t.datetime, orbit[:,sma]*(1+orbit[:,ecc]))
sub3.plot(t.datetime, orbit[:,ecc])
sub4.plot(t.datetime, orbit[:,aop])
sub4_rap.plot(t.datetime, np.fmod(orbit[:,aop] + orbit[:,raan],360))
sub5.plot(t.datetime, orbit[:,inc])
sub6.plot(t.datetime, orbit[:,raan])
sub_peri.axhline(y = 1737, color='red')
month_locator = mdates.MonthLocator()
day_locator = mdates.DayLocator()
for sub in subs:
sub.set_xlabel('Time')
sub.xaxis.set_major_locator(month_locator)
sub.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
sub.xaxis.set_tick_params(rotation=45)
#sub1.set_ylabel('SMA (km)')
sub_peri.set_ylabel('Periapsis radius (km)')
sub_apo.set_ylabel('Apoapsis radius (km)')
sub3.set_ylabel('ECC')
sub4.set_ylabel('AOP (deg)')
sub4_rap.set_ylabel('RAOP (deg)')
sub5.set_ylabel('INC (deg)')
sub6.set_ylabel('RAAN (deg)')
#sub1.set_title('Semi-major axis')
sub_peri.set_title('Periapsis radius')
sub_apo.set_title('Apoapsis radius')
sub3.set_title('Eccentricity')
sub4.set_title('Argument of periapsis')
sub4_rap.set_title('Right ascension of periapsis')
sub5.set_title('Inclination')
sub6.set_title('Right ascension of ascending node')
for sub in subs:
sub.legend(['Without periapsis raise', 'With periapsis raise', 'Collision trajectory'])
sub_peri.legend(['Without periapsis raise', 'With periapsis raise', 'Collision trajectory', 'Lunar radius']);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot the orbital parameters which are vary significantly between different tracking files.
|
13,250
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import bokeh.plotting as bkp
from mpl_toolkits.axes_grid1 import make_axes_locatable
# read in readmissions data provided
hospital_read_df = pd.read_csv('data/cms_hospital_readmissions.csv')
hospital_read_df.head()
# deal with missing and inconvenient portions of data
clean_hospital_read_df = hospital_read_df[hospital_read_df['Number of Discharges'] != 'Not Available']
clean_hospital_read_df.loc[:,'Number of Discharges'] = clean_hospital_read_df['Number of Discharges'].astype(int)
clean_hospital_read_df = clean_hospital_read_df.sort_values('Number of Discharges')
# generate a scatterplot for number of discharges vs. excess rate of readmissions
# lists work better with matplotlib scatterplot function
x = [a for a in clean_hospital_read_df['Number of Discharges'][81:-3]]
y = list(clean_hospital_read_df['Excess Readmission Ratio'][81:-3])
fig, ax = plt.subplots(figsize=(8,5))
ax.scatter(x, y,alpha=0.2)
#ax.plot(x,y)
ax.fill_between([0,350], 1.15, 2, facecolor='red', alpha = .15, interpolate=True)
ax.fill_between([800,2500], .5, .95, facecolor='green', alpha = .15, interpolate=True)
ax.set_xlim([0, max(x)])
ax.set_xlabel('Number of discharges', fontsize=12)
ax.set_ylabel('Excess rate of readmissions', fontsize=12)
ax.set_title('Scatterplot of number of discharges vs. excess rate of readmissions', fontsize=14)
ax.grid(True)
fig.tight_layout()
hospital_cap_small= clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'].astype(int)<100]
hospital_cap_small = hospital_cap_small[hospital_cap_small['Number of Discharges'].astype(int) != 0]
hospital_small=sorted(hospital_cap_small['Excess Readmission Ratio'])
hospital_cap_big= clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'].astype(int)>1000]
hospital_cap_big = hospital_cap_big[hospital_cap_big['Number of Discharges'].astype(int) != 0]
hospital_big=sorted(hospital_cap_big['Excess Readmission Ratio'])
from scipy import stats
statistic = stats.f_oneway(hospital_small,hospital_big)
statistic
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preliminary Analysis
Step2: Preliminary Report
|
13,251
|
<ASSISTANT_TASK:>
Python Code:
import sqlite3 as sql
import os
from pprint import pprint
class DB:
backend = 'sqlite3' # default
target_path = os.getcwd() # current directory
db_name = ":file:" # lets work directly with a file
db_name = os.path.join(target_path, 'shapes_lib.db')
@classmethod
def connect(cls):
if cls.backend == 'sqlite3':
DB.conn = sql.connect(DB.db_name)
DB.c = DB.conn.cursor()
elif cls.backend == 'postgres': # or something else
DB.conn = sql.connect(host='localhost',
user='root', port='8889')
DB.c = DB.conn.cursor()
return DB
__enter__ = connect # allows optional syntax to trigger
@classmethod
def disconnect(cls):
DB.conn.close()
def __exit__(cls, *oops): # in case of context manager syntax
cls.disconnect()
if oops[0]:
return False
return True
with DB() as db:
db.c.execute("SELECT poly_long, poly_color, poly_volume from Polys") # query
pprint(db.c.fetchall()) # print results
with DB() as db:
db.c.execute("SELECT vertex_label, coord_a, coord_b, coord_c, coord_d FROM Coords ORDER BY vertex_label") # query
pprint(db.c.fetchall()) # print results
with DB() as db:
db.c.execute("SELECT vertex_label, coord_x, coord_y, coord_z FROM Coords ORDER BY vertex_label") # query
pprint(db.c.fetchall()) # print results
DB.connect()
DB.c.execute("SELECT * FROM Coords ORDER BY vertex_label") # query
for rec in DB.c.fetchall():
print(rec) # print results
DB.disconnect()
query3 = (
SELECT f.poly_nick, f.poly_face_id, f.vertex_labels, p.poly_long
FROM Polys p
INNER JOIN Faces f ON f.poly_nick = p.poly_nick
WHERE p.poly_nick = "RD"
)
with DB() as db:
db.c.execute(query3) # query
pprint(db.c.fetchall()) # print results
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The DB class contains information for connecting to a SQLite database, which may be accessed directly, as a text file, no need for a special server. SQLite is free of charge.
Step2: What we see above is connecting to the database, running a one-liner, a SELECT query, and pretty-printing the results. Then disconnecting.
Step3: Why are these coordinates integers and why are there four of them per labeled vertex? Is this another "legal in Oregon" thing?
Step4: As shown below, we're not obligated to use the DB class as a context manager, in which case we may call the class's methods directly. Context manager syntax is a little cleaner and more likely to disconnect in case of issues.
Step6: The database in question consists of three tables
|
13,252
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import nibabel
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
import mne
from mne.transforms import apply_trans
from mne.io.constants import FIFF
data_path = mne.datasets.sample.data_path()
subjects_dir = os.path.join(data_path, 'subjects')
subject = 'sample'
t1_fname = os.path.join(subjects_dir, subject, 'mri', 'T1.mgz')
t1 = nibabel.load(t1_fname)
t1.orthoview()
data = np.asarray(t1.dataobj)
print(data.shape)
print(t1.affine)
vox = np.array([122, 119, 102])
xyz_ras = apply_trans(t1.affine, vox)
print('Our voxel has real-world coordinates {}, {}, {} (mm)'
.format(*np.round(xyz_ras, 3)))
ras_coords_mm = np.array([1, -17, -18])
inv_affine = np.linalg.inv(t1.affine)
i_, j_, k_ = np.round(apply_trans(inv_affine, ras_coords_mm)).astype(int)
print('Our real-world coordinates correspond to voxel ({}, {}, {})'
.format(i_, j_, k_))
def imshow_mri(data, img, vox, xyz, suptitle):
Show an MRI slice with a voxel annotated.
i, j, k = vox
fig, ax = plt.subplots(1, figsize=(6, 6))
codes = nibabel.orientations.aff2axcodes(img.affine)
# Figure out the title based on the code of this axis
ori_slice = dict(P='Coronal', A='Coronal',
I='Axial', S='Axial',
L='Sagittal', R='Saggital')
ori_names = dict(P='posterior', A='anterior',
I='inferior', S='superior',
L='left', R='right')
title = ori_slice[codes[0]]
ax.imshow(data[i], vmin=10, vmax=120, cmap='gray', origin='lower')
ax.axvline(k, color='y')
ax.axhline(j, color='y')
for kind, coords in xyz.items():
annotation = ('{}: {}, {}, {} mm'
.format(kind, *np.round(coords).astype(int)))
text = ax.text(k, j, annotation, va='baseline', ha='right',
color=(1, 1, 0.7))
text.set_path_effects([
path_effects.Stroke(linewidth=2, foreground='black'),
path_effects.Normal()])
# reorient view so that RAS is always rightward and upward
x_order = -1 if codes[2] in 'LIP' else 1
y_order = -1 if codes[1] in 'LIP' else 1
ax.set(xlim=[0, data.shape[2] - 1][::x_order],
ylim=[0, data.shape[1] - 1][::y_order],
xlabel=f'k ({ori_names[codes[2]]}+)',
ylabel=f'j ({ori_names[codes[1]]}+)',
title=f'{title} view: i={i} ({ori_names[codes[0]]}+)')
fig.suptitle(suptitle)
fig.subplots_adjust(0.1, 0.1, 0.95, 0.85)
return fig
imshow_mri(data, t1, vox, {'Scanner RAS': xyz_ras}, 'MRI slice')
Torig = t1.header.get_vox2ras_tkr()
print(t1.affine)
print(Torig)
xyz_mri = apply_trans(Torig, vox)
imshow_mri(data, t1, vox, dict(MRI=xyz_mri), 'MRI slice')
fiducials = mne.coreg.get_mni_fiducials(subject, subjects_dir=subjects_dir)
nasion_mri = [d for d in fiducials if d['ident'] == FIFF.FIFFV_POINT_NASION][0]
print(nasion_mri) # note it's in Freesurfer MRI coords
nasion_mri = nasion_mri['r'] * 1000 # meters → millimeters
nasion_vox = np.round(
apply_trans(np.linalg.inv(Torig), nasion_mri)).astype(int)
imshow_mri(data, t1, nasion_vox, dict(MRI=nasion_mri),
'Nasion estimated from MRI transform')
info = mne.io.read_info(
os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif'))
nasion_head = [d for d in info['dig'] if
d['kind'] == FIFF.FIFFV_POINT_CARDINAL and
d['ident'] == FIFF.FIFFV_POINT_NASION][0]
print(nasion_head) # note it's in "head" coordinates
trans = mne.read_trans(
os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif'))
# first we transform from head to MRI, and *then* convert to millimeters
nasion_dig_mri = apply_trans(trans, nasion_head['r']) * 1000
# ...then we can use Torig to convert MRI to voxels:
nasion_dig_vox = np.round(
apply_trans(np.linalg.inv(Torig), nasion_dig_mri)).astype(int)
imshow_mri(data, t1, nasion_dig_vox, dict(MRI=nasion_dig_mri),
'Nasion transformed from digitization')
fname = os.path.join(subjects_dir, subject, 'surf', 'rh.white')
rr_mm, tris = mne.read_surface(fname)
print(f'rr_mm.shape == {rr_mm.shape}')
print(f'tris.shape == {tris.shape}')
print(f'rr_mm.max() = {rr_mm.max()}') # just to show that we are in mm
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(600, 600), bgcolor='w', scene=False)
gray = (0.5, 0.5, 0.5)
renderer.mesh(*rr_mm.T, triangles=tris, color=gray)
view_kwargs = dict(elevation=90, azimuth=0)
mne.viz.set_3d_view(
figure=renderer.figure, distance=350, focalpoint=(0., 0., 40.),
**view_kwargs)
renderer.show()
rr_vox = apply_trans(np.linalg.inv(Torig), rr_mm)
fig = imshow_mri(data, t1, vox, {'Scanner RAS': xyz_ras}, 'MRI slice')
# Based on how imshow_mri works, the "X" here is the last dim of the MRI vol,
# the "Y" is the middle dim, and the "Z" is the first dim, so now that our
# points are in the correct coordinate frame, we need to ask matplotlib to
# do a tricontour slice like:
fig.axes[0].tricontour(rr_vox[:, 2], rr_vox[:, 1], tris, rr_vox[:, 0],
levels=[vox[0]], colors='r', linewidths=1.0,
zorder=1)
renderer_kwargs = dict(bgcolor='w', smooth_shading=False)
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(800, 400), scene=False, **renderer_kwargs)
curvs = [
(mne.surface.read_curvature(os.path.join(
subjects_dir, subj, 'surf', 'rh.curv'),
binary=False) > 0).astype(float)
for subj in ('sample', 'fsaverage') for _ in range(2)]
fnames = [os.path.join(subjects_dir, subj, 'surf', surf)
for subj in ('sample', 'fsaverage')
for surf in ('rh.white', 'rh.sphere')]
y_shifts = [-450, -150, 450, 150]
z_shifts = [-40, 0, -30, 0]
for name, y_shift, z_shift, curv in zip(fnames, y_shifts, z_shifts, curvs):
this_rr, this_tri = mne.read_surface(name)
this_rr += [0, y_shift, z_shift]
renderer.mesh(*this_rr.T, triangles=this_tri, color=None, scalars=curv,
colormap='copper_r', vmin=-0.2, vmax=1.2)
zero = [0., 0., 0.]
width = 50.
y = np.sort(y_shifts)
y = (y[1:] + y[:-1]) / 2. - width / 2.
renderer.quiver3d(zero, y, zero,
zero, [1] * 3, zero, 'k', width, 'arrow')
view_kwargs['focalpoint'] = (0., 0., 0.)
mne.viz.set_3d_view(figure=renderer.figure, distance=1000, **view_kwargs)
renderer.show()
cyan = '#66CCEE'
purple = '#AA3377'
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(800, 800), scene=False, **renderer_kwargs)
fnames = [os.path.join(subjects_dir, subj, 'surf', 'rh.sphere')
for subj in ('sample', 'fsaverage')]
colors = [cyan, purple]
for name, color in zip(fnames, colors):
this_rr, this_tri = mne.read_surface(name)
renderer.mesh(*this_rr.T, triangles=this_tri, color=color,
representation='wireframe')
mne.viz.set_3d_view(figure=renderer.figure, distance=20, **view_kwargs)
renderer.show()
src = mne.read_source_spaces(os.path.join(subjects_dir, 'sample', 'bem',
'sample-oct-6-src.fif'))
print(src)
blue = '#4477AA'
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(800, 800), scene=False, **renderer_kwargs)
rr_sph, _ = mne.read_surface(fnames[0])
for tris, color in [(src[1]['tris'], cyan), (src[1]['use_tris'], blue)]:
renderer.mesh(*rr_sph.T, triangles=tris, color=color,
representation='wireframe')
mne.viz.set_3d_view(figure=renderer.figure, distance=20, **view_kwargs)
renderer.show()
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(800, 400), scene=False, **renderer_kwargs)
y_shifts = [-125, 125]
tris = [src[1]['tris'], src[1]['use_tris']]
for y_shift, tris in zip(y_shifts, tris):
this_rr = src[1]['rr'] * 1000. + [0, y_shift, -40]
renderer.mesh(*this_rr.T, triangles=tris, color=None, scalars=curvs[0],
colormap='copper_r', vmin=-0.2, vmax=1.2)
renderer.quiver3d([0], [-width / 2.], [0], [0], [1], [0], 'k', width, 'arrow')
mne.viz.set_3d_view(figure=renderer.figure, distance=400, **view_kwargs)
renderer.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MRI coordinate frames
Step2: Notice that the axes in the
Step3: These data are voxel intensity values. Here they are unsigned integers in the
Step4: If you have a point (x, y, z) in scanner-native RAS space and you want
Step6: Let's write a short function to visualize where our voxel lies in an
Step7: Notice that the axis scales (i, j, and k) are still in voxels
Step8: Knowing these relationships and being mindful about transformations, we
Step9: When we print the nasion, it displays as a DigPoint and shows its
Step10: We can also take the digitization point from the MEG data, which is in the
Step11: .. sidebar
Step12: Using FreeSurfer's surface reconstructions
Step13: Let's actually plot it
Step14: We can also plot the mesh on top of an MRI slice. The mesh surfaces are
Step15: This is the method used by
Step16: Let's look a bit more closely at the spherical alignment by overlaying the
Step17: You can see that the fsaverage (purple) mesh is uniformly spaced, and the
Step18: We can also then look at how these two meshes compare by plotting the
|
13,253
|
<ASSISTANT_TASK:>
Python Code:
def fibonacci(n):
a, b = 0, 1
while n:
a, b = b, a + b
n -= 1
return a
for n in range(10):
print(fibonacci(n))
[fibonacci(n) for n in range(10)]
def gen_fibo(n):
a, b = 0, 1
while n:
yield a
a, b = b, a + b
n -= 1
g10 = gen_fibo(10)
g10
f10 = list(g10)
f10
for n in gen_fibo(n):
print(n)
f10 = list(gen_fibo(10))
ratios = [a/b for a, b in zip(f10[2:], f10[1:-1])]
ratios
φ = (1 + 5**0.5) / 2
φ
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure()
plt.plot(ratios)
plt.axhline(φ, color='r')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The generator
Step2: Fibonacci numbers and the golden ratio
|
13,254
|
<ASSISTANT_TASK:>
Python Code:
!head -n12 $LISA_HOME/logging.conf
!head -n30 $LISA_HOME/logging.conf | tail -n5
import logging
from conf import LisaLogging
LisaLogging.setup(level=logging.INFO)
from env import TestEnv
te = TestEnv({
'platform' : 'linux',
'board' : 'juno',
'host' : '10.1.210.45',
'username' : 'root'
})
target = te.target
tests_conf = {
"confs" : [
{
"tag" : "base",
"flags" : "ftrace",
"sched_features" : "NO_ENERGY_AWARE",
"cpufreq" : {
"governor" : "performance",
},
"files" : {
'/proc/sys/kernel/sched_is_big_little' : '0',
'!/proc/sys/kernel/sched_migration_cost_ns' : '500000'
},
}
]
}
from trace import Trace
import json
with open('/home/patbel01/Code/lisa/results/LisaInANutshell_Backup/platform.json', 'r') as fh:
platform = json.load(fh)
trace = Trace('/home/patbel01/Code/lisa/results/LisaInANutshell_Backup/trace.dat',
['sched_switch'], platform
))
logging.info("%d tasks loaded from trace", len(trace.getTasks()))
logging.info("The rt-app task in this trace has these PIDs:")
logging.info(" %s", trace.getTasks()['rt-app'])
!cat $LISA_HOME/libs/utils/platforms/pixel.json
from env import TestEnv
te = TestEnv({
'platform' : 'android',
'board' : 'pixel',
'ANDROID_HOME' : '/home/patbel01/Code/lisa/tools/android-sdk-linux/'
}, force_new=True)
target = te.target
!tree -L 1 ~/Code/lisa/ipynb
!tree -L 1 ~/Code/lisa/ipynb/examples
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Each module has a unique name which can be used to assign a priority level for messages generated by that module.
Step2: The default logging level for a notebook can also be easily configured using this few lines
Step3: Removed Juno/Juno2 distinction
Step4: Executor Module
Step5: can be used to run a test where the platform is configured to
Step6: Android Support
Step7: Added UiBench workload
Step8: This folder is configured to be ignored by git, thus it's the best place to place your work-in-progress notebooks.
|
13,255
|
<ASSISTANT_TASK:>
Python Code:
# Create a SystemML MLContext object
from systemml import MLContext, dml
ml = MLContext(sc)
%%sh
mkdir -p data/mnist/
cd data/mnist/
curl -O http://pjreddie.com/media/files/mnist_train.csv
curl -O http://pjreddie.com/media/files/mnist_test.csv
script_string =
source("mnist_lenet.dml") as mnist_lenet
# Read training data
data = read($data, format="csv")
n = nrow(data)
# Extract images and labels
images = data[,2:ncol(data)]
labels = data[,1]
# Scale images to [-1,1], and one-hot encode the labels
images = (images / 255.0) * 2 - 1
labels = table(seq(1, n), labels+1, n, 10)
# Split into training (55,000 examples) and validation (5,000 examples)
X = images[5001:nrow(images),]
X_val = images[1:5000,]
y = labels[5001:nrow(images),]
y_val = labels[1:5000,]
# Train
[W1, b1, W2, b2, W3, b3, W4, b4] = mnist_lenet::train(X, y, X_val, y_val, C, Hin, Win)
script = (dml(script_string).input("$data", "data/mnist/mnist_train.csv")
.input(C=1, Hin=28, Win=28)
.output("W1", "b1", "W2", "b2", "W3", "b3", "W4", "b4"))
W1, b1, W2, b2, W3, b3, W4, b4 = (ml.execute(script)
.get("W1", "b1", "W2", "b2", "W3", "b3", "W4", "b4"))
script_string =
source("mnist_lenet.dml") as mnist_lenet
# Read test data
data = read($data, format="csv")
n = nrow(data)
# Extract images and labels
X_test = data[,2:ncol(data)]
y_test = data[,1]
# Scale images to [-1,1], and one-hot encode the labels
X_test = (X_test / 255.0) * 2 - 1
y_test = table(seq(1, n), y_test+1, n, 10)
# Eval on test set
probs = mnist_lenet::predict(X_test, C, Hin, Win, W1, b1, W2, b2, W3, b3, W4, b4)
[loss, accuracy] = mnist_lenet::eval(probs, y_test)
print("Test Accuracy: " + accuracy)
script = dml(script_string).input(**{"$data": "data/mnist/mnist_train.csv",
"C": 1, "Hin": 28, "Win": 28,
"W1": W1, "b1": b1,
"W2": W2, "b2": b2,
"W3": W3, "b3": b3,
"W4": W4, "b4": b4})
ml.execute(script)
W1_df = W1.toDF()
b1_df = b1.toDF()
W2_df = W2.toDF()
b2_df = b2.toDF()
W3_df = W3.toDF()
b3_df = b3.toDF()
W4_df = W4.toDF()
b4_df = b4.toDF()
W1_df, b1_df, W2_df, b2_df, W3_df, b3_df, W4_df, b4_df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download Data - MNIST
Step3: SystemML "LeNet" Neural Network
Step5: 2. Compute Test Accuracy
Step6: 3. Extract Model Into Spark DataFrames For Future Use
|
13,256
|
<ASSISTANT_TASK:>
Python Code:
import pandas
import numpy
from folding_group import FoldingGroupClassifier
from rep.data import LabeledDataStorage
from rep.report import ClassificationReport
from rep.report.metrics import RocAuc
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve, roc_auc_score
from utils import get_N_B_events, get_events_number, get_events_statistics
import root_numpy
data_nan = pandas.DataFrame(root_numpy.root2array('datasets/tracks.root', 'tracks'))
data_nan.head()
event_id_column = 'event_id'
event_id = data_nan.run.apply(str) + '_' + data_nan.event.apply(str)
data_nan['group_column'] = numpy.unique(event_id, return_inverse=True)[1]
data_nan[event_id_column] = event_id
get_events_statistics(data_nan)
get_N_B_events()
data = data_nan.dropna()
len(data_nan), len(data), get_events_statistics(data)
# add different between max pt in event and pt for each track
def add_diff_pt(data):
max_pt = group_max(data[event_id_column].values.astype(str), data.partPt.values)
data.loc[:, 'diff_pt'] = max_pt - data['partPt'].values
# max is computing max over tracks in the same event for saome data
def group_max(groups, data):
# computing unique integer id for each group
assert len(groups) == len(data)
_, event_id = numpy.unique(groups, return_inverse=True)
max_over_event = numpy.zeros(max(event_id) + 1) - numpy.inf
numpy.maximum.at(max_over_event, event_id, data)
return max_over_event[event_id]
# add diff pt
add_diff_pt(data)
# add cos(diff_phi)
data.loc[:, 'cos_diff_phi'] = numpy.cos(data.diff_phi.values)
from itertools import combinations
PIDs = {'k': data.PIDNNk.values,
'e': data.PIDNNe.values,
'mu': data.PIDNNm.values,
}
for (pid_name1, pid_values1), (pid_name2, pid_values2) in combinations(PIDs.items(), 2):
data.loc[:, 'max_PID_{}_{}'.format(pid_name1, pid_name2)] = numpy.maximum(pid_values1, pid_values2)
data.loc[:, 'sum_PID_{}_{}'.format(pid_name1, pid_name2)] = pid_values1 + pid_values2
data.loc[:, 'label'] = (data.signB.values * data.signTrack.values > 0) * 1
','.join(data.columns)
initial_cut = '(ghostProb < 0.4)'
data = data.query(initial_cut)
os_selection = (data.IPs.values > 3) * ((abs(data.diff_eta.values) > 0.6) | (abs(data.diff_phi.values) > 0.825))
data = data[os_selection]
get_events_statistics(data)
threshold_kaon = 0.
threshold_muon = 0.
threshold_electron = 0.
threshold_pion = 0.5
threshold_proton = 0.5
cut_pid = " ((PIDNNk > {trk}) | (PIDNNm > {trm}) | (PIDNNe > {tre})) & (PIDNNpi < {trpi}) & (PIDNNp < {trp}) "
cut_pid = cut_pid.format(trk=threshold_kaon, trm=threshold_muon, tre=threshold_electron, trpi=threshold_pion,
trp=threshold_proton)
data = data.query(cut_pid)
get_events_statistics(data)
N_B_passed = float(get_events_number(data))
tagging_efficiency = N_B_passed / get_N_B_events()
tagging_efficiency_delta = sqrt(N_B_passed) / get_N_B_events()
tagging_efficiency, tagging_efficiency_delta
hist(data.diff_pt.values, bins=100)
pass
_, take_indices = numpy.unique(data[event_id_column], return_index=True)
figure(figsize=[15, 5])
subplot(1, 2, 1)
hist(data.Bmass.values[take_indices], bins=100)
title('B mass hist')
xlabel('mass')
subplot(1, 2, 2)
hist(data.N_sig_sw.values[take_indices], bins=100, normed=True)
title('sWeights hist')
xlabel('signal sWeights')
plt.savefig('img/Bmass_OS.png' , format='png')
sweight_threshold = 1.
data_sw_passed = data[data.N_sig_sw > sweight_threshold]
data_sw_not_passed = data[data.N_sig_sw <= sweight_threshold]
get_events_statistics(data_sw_passed)
_, take_indices = numpy.unique(data_sw_passed[event_id_column], return_index=True)
figure(figsize=[15, 5])
subplot(1, 2, 1)
hist(data_sw_passed.Bmass.values[take_indices], bins=100)
title('B mass hist for sWeight > 1 selection')
xlabel('mass')
subplot(1, 2, 2)
hist(data_sw_passed.N_sig_sw.values[take_indices], bins=100, normed=True)
title('sWeights hist for sWeight > 1 selection')
xlabel('signal sWeights')
plt.savefig('img/Bmass_selected_OS.png' , format='png')
hist(data_sw_passed.diff_pt.values, bins=100)
pass
features = list(set(data.columns) - {'index', 'run', 'event', 'i', 'signB', 'signTrack', 'N_sig_sw', 'Bmass', 'mult',
'PIDNNp', 'PIDNNpi', 'label', 'thetaMin', 'Dist_phi', event_id_column,
'mu_cut', 'e_cut', 'K_cut', 'ID', 'diff_phi', 'group_column'})
features
figure(figsize=[15, 16])
bins = 60
step = 3
for i, (feature1, feature2) in enumerate(combinations(['PIDNNk', 'PIDNNm', 'PIDNNe', 'PIDNNp', 'PIDNNpi'], 2)):
subplot(4, 3, i + 1)
Z, (x, y) = numpy.histogramdd(data_sw_passed[[feature1, feature2]].values, bins=bins, range=([0, 1], [0, 1]))
pcolor(numpy.log(Z).T, vmin=0)
xlabel(feature1)
ylabel(feature2)
xticks(numpy.arange(bins, step), x[::step]), yticks(numpy.arange(bins, step), y[::step])
plt.savefig('img/PID_selected_OS.png' , format='png')
hist(data_sw_passed.diff_pt.values, bins=60, normed=True)
pass
_, n_tracks = numpy.unique(data_sw_passed[event_id_column], return_counts=True)
hist(n_tracks, bins=max(n_tracks), range=(1, max(n_tracks)))
title('Number of tracks')
plt.savefig('img/tracks_number_OS.png' , format='png')
figure(figsize=[15, 4])
for i, column in enumerate(['PIDNNm', 'PIDNNe', 'PIDNNk']):
subplot(1, 3, i + 1)
hist(data_sw_passed[column].values, bins=60, range=(0, 1), label=column)
legend()
from hep_ml.decisiontrain import DecisionTrainClassifier
from hep_ml.losses import LogLossFunction
from rep.estimators import SklearnClassifier
data_sw_passed_lds = LabeledDataStorage(data_sw_passed, data_sw_passed.label, data_sw_passed.N_sig_sw.values)
tt_base = DecisionTrainClassifier(learning_rate=0.02, n_estimators=3000, depth=6,
max_features=15, loss=LogLossFunction(regularization=100))
tt_folding = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=11,
train_features=features, group_feature='group_column',
parallel_profile = 'threads-2')
%time tt_folding.fit_lds(data_sw_passed_lds)
pass
import cPickle
with open('models/dt_OS.pkl', 'w') as f:
cPickle.dump(tt_folding, f)
comparison_report = ClassificationReport({'tt': tt_folding}, data_sw_passed_lds)
comparison_report.compute_metric(RocAuc())
comparison_report.roc()
lc = comparison_report.learning_curve(RocAuc(), steps=1)
lc
for est in tt_folding.estimators:
est.estimators = est.estimators[:1000]
comparison_report.feature_importance()
from utils import get_result_with_bootstrap_for_given_part
models = []
models.append(get_result_with_bootstrap_for_given_part(tagging_efficiency, tagging_efficiency_delta, tt_folding,
[data_sw_passed, data_sw_not_passed], 'tt-iso', logistic=False))
models.append(get_result_with_bootstrap_for_given_part(tagging_efficiency, tagging_efficiency_delta, tt_folding,
[data_sw_passed, data_sw_not_passed], 'tt-log', logistic=True))
pandas.set_option('display.precision', 8)
result = pandas.concat(models)
result.index = result.name
result.drop('name', axis=1)
from utils import prepare_B_data_for_given_part
Bdata_prepared = prepare_B_data_for_given_part(tt_folding, [data_sw_passed, data_sw_not_passed], logistic=True)
Bdata_prepared.to_csv('models/Bdata_tracks_OS.csv', header=True, index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading initial data
Step2: Remove rows with NAN from data
Step3: Add diff_pt and cos(diff_phi)
Step4: Add max, sum among PIDs
Step5: define label = signB * signTrack
Step6: Apply ghost prob cut
Step7: Leave not muons, kaons, electrons; Filter pions, protons
Step8: Calculating tagging efficiency ($\epsilon_{tag}$)
Step9: Choose most probable B-events
Step10: Define B-like events for training
Step11: Main idea
Step12: PID pairs scatters
Step13: pt
Step14: count of tracks
Step15: PIDs histograms
Step16: Train to distinguish same sign vs opposite sign
Step17: DT
Step18: Calibration
Step19: Comparison table of different models
Step20: Implementing best tracking
|
13,257
|
<ASSISTANT_TASK:>
Python Code:
prime_ministers = ['David Ben-Gurion', 'Moshe Sharett', 'David Ben-Gurion', 'Levi Eshkol', 'Yigal Alon', 'Golda Meir']
print(prime_ministers)
type(prime_ministers)
numbers = [1, 2, 3, 4, 5, 6, 7]
wtf = ['The cake is a', False, 42]
empty_list = []
# Index 0 1 2 3 4 5
vinyls = ['Ecliptica', 'GoT Season 6', 'Lone Digger', 'Everything goes numb', 'Awesome Mix Vol. 1', 'Ultimate Sinatra']
print(vinyls[4])
# 0 1 2 3 4 5
vinyls = ['Ecliptica', 'GoT Season 6', 'Lone Digger', 'Everything goes number', 'Awesome Mix Vol. 1', 'Ultimate Sinatra']
# -6 -5 -4 -3 -2 -1
print(vinyls[-2])
type(vinyls[0])
print(vinyls[0] + ', By Sonata Arctica')
# כמה תקליטים יש לי?
len(vinyls)
print(vinyls)
vinyls[1] = 'GoT Season 7'
print(vinyls)
[1, 2, 3] + [4, 5, 6]
['a', 'b', 'c'] + ['easy', 'as'] + [1, 2, 3]
['wake up', 'go to school', 'sleep'] * 365
['Is', 'someone', 'getting'] + ['the', 'best,'] * 4 + ['of', 'you?']
[1, 2, 3] + 5
prime_ministers = ['David Ben-Gurion', 'Moshe Sharett', 'David Ben-Gurion', 'Levi Eshkol', 'Yigal Alon', 'Golda Meir']
print(prime_ministers)
prime_ministers + ['Yitzhak Rabin']
print(prime_ministers)
print(prime_ministers)
prime_ministers = prime_ministers + ['Yitzhak Rabin']
print(prime_ministers)
pupils_in_sunday = ['Moshe', 'Dukasit', 'Michelangelo']
pupils_in_monday = ['Moshe', 'Dukasit', 'Master Splinter']
pupils_in_tuesday = ['Moshe', 'Dukasit', 'Michelangelo']
pupils_in_wednesday = ['Moshe', 'Dukasit', 'Michelangelo', 'Master Splinter']
print("Is it Monday? " + str(pupils_in_sunday == pupils_in_monday))
print("Is it Tuesday? " + str(pupils_in_sunday == pupils_in_tuesday))
print("Is it Wednesday? " + str(pupils_in_sunday == pupils_in_wednesday))
print('Moshe' in pupils_in_tuesday)
# זה אותו דבר כמו:
print('Moshe' in ['Moshe', 'Dukasit', 'Michelangelo'])
'Master Splinter' not in pupils_in_tuesday
python_new_version = [3, 7, 2]
python_old_version = [2, 7, 16]
print(python_new_version > python_old_version)
pupils_in_sunday = ['Moshe', 'Dukasit', 'Michelangelo']
pupils_in_monday = ['Moshe', 'Dukasit', 'Splinter']
pupils_in_tuesday = ['Moshe', 'Dukasit', 'Michelangelo']
pupils_in_wednesday = ['Moshe', 'Dukasit', 'Michelangelo', 'Splinter']
pupils = [pupils_in_sunday, pupils_in_monday, pupils_in_tuesday, pupils_in_wednesday]
print(pupils)
pupils = [['Moshe', 'Dukasit', 'Michelangelo'], ['Moshe', 'Dukasit', 'Splinter'], ['Moshe', 'Dukasit', 'Michelangelo'], ['Moshe', 'Dukasit', 'Michelangelo', 'Splinter']]
pupils[0]
pupils_in_sunday = pupils[0]
print(pupils_in_sunday[-1])
# או פשוט:
print(pupils[0][-1])
print("pupils = " + str(pupils))
print("-" * 50)
print("1. 'Moshe' in pupils == " + str('Moshe' in pupils))
print("2. 'Moshe' in pupils[0] == " + str('Moshe' in pupils[0]))
print("3. ['Moshe', 'Splinter'] in pupils == " + str(['Moshe', 'Splinter'] in pupils))
print("4. ['Moshe', 'Splinter'] in pupils[-1] == " + str(['Moshe', 'Splinter'] in pupils[-1]))
print("5. ['Moshe', 'Dukasit', 'Splinter'] in pupils == " + str(['Moshe', 'Dukasit', 'Splinter'] in pupils))
print("6. ['Moshe', 'Dukasit', 'Splinter'] in pupils[0] == " + str(['Moshe', 'Dukasit', 'Splinter'] in pupils[0]))
judges = ['Esther Hayut', 'Miriam Naor', 'Asher Grunis', 'Dorit Beinisch', 'Aharon Barak']
ice_cream_flavours = ['chocolate', 'vanilla', 'pistachio', 'banana']
rabanim = ['Rashi', 'Maimonides', 'Nachmanides', 'Rabbeinu Tam']
'Rashi' in rabanim
'RASHI' in rabanim
['Rashi'] in rabanim
['Rashi', 'Nachmanides'] in rabanim
'Bruria' in rabanim
rabanim + ['Gershom ben Judah']
'Gershom ben Judah' in rabanim
'3' in [1, 2, 3]
(1 + 5 - 3) in [1, 2, 3]
[1, 5, 3] > [1, 2, 3]
rabanim[0] in [rabanim[0] + rabanim[1]]
rabanim[0] in [rabanim[0]] + [rabanim[1]]
rabanim[-1] == rabanim[0] or rabanim[-1] == rabanim[1] or rabanim[-1] == rabanim[2] or rabanim[-1] == rabanim[3]
rabanim[-1] == rabanim[0] or rabanim[-1] == rabanim[1] or rabanim[-1] == rabanim[2] and rabanim[-1] != rabanim[3]
rabanim[-1] == rabanim[0] or rabanim[-1] == rabanim[1] or rabanim[-1] == rabanim[2] and rabanim[-1] == rabanim[3]
1 in [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
[1, 2, 3] in [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
[[1, 2, 3], [4, 5, 6], [7, 8, 9]][0][2]
[[1, 2, 3], [4, 5, 6], [7, 8, 9]][0][3]
[[1, 2, 3], [4, 5, 6], [7, 8, 9]][0][-1] * 5
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]][0][-1]] * 5
[[1, 2, 3], [4, 5, 6], [7, 8, 9]][0][-1]
[[1, 2, 3], [4, 5, 6], [7, 8, 9]][0][-1] == [[7, 8, 9], [4, 5, 6], [1, 2, 3]][2][2]
[[1, 2, 3]] in [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
[[1, 2, 3], [4, 5, 6]] in [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
[[1, 2, 3], [4, 5, 6]] in [[[1, 2, 3], [4, 5, 6]], [7, 8, 9]]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <p style="text-align
Step2: <p style="text-align
Step3: <p style="text-align
Step4: <p style="text-align
Step5: <p style="text-align
Step6: <p style="text-align
Step7: <div class="align-center" style="display
Step8: <p style="text-align
Step9: <p style="text-align
Step10: <p style="text-align
Step11: <div class="align-center" style="display
Step12: <p style="text-align
Step13: <p style="text-align
Step14: <p style="text-align
Step15: <p style="text-align
Step16: <p style="text-align
Step17: <p style="text-align
Step18: <p style="text-align
Step19: <p style="text-align
Step20: <p style="text-align
Step21: <p style="text-align
Step22: <p style="text-align
Step23: <p style="text-align
Step24: <table style="font-size
Step25: <p style="text-align
Step26: <p style="text-align
Step27: <p style="text-align
Step28: <ol style="text-align
Step29: <p style="text-align
Step30: <p style="align
|
13,258
|
<ASSISTANT_TASK:>
Python Code::
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
13,259
|
<ASSISTANT_TASK:>
Python Code:
###ignore this block of code - it is required only to show the map in iPython - you won't need it!
from IPython.core.display import display, HTML
display(HTML('<iframe width="800" height="600" frameborder="1" scrolling ="no" src="./qgis2threejs/ACT_elevs_test_1.html"></iframe>'))
display(HTML('<iframe width="800" height="600" frameborder="1" scrolling ="no" src="./qgis2threejs/act_elevs_plus_greenveg.html"></iframe>'))
display(HTML('<iframe width="800" height="600" frameborder="1" scrolling ="no" src="./qgis2threejs/act_block_hilliness_proxy.html"></iframe>'))
display(HTML('<iframe width="800" height="600" frameborder="1" scrolling ="no" src="./qgis2threejs/veg_mean_colours.html"></iframe>'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: and there - your first 3D interactive map, made with no coding and using web data services!
Step2: Now we have an elevation map coloured by green vegetation! But that's only a pretty picture.
Step3: Now, let's add some complexity
|
13,260
|
<ASSISTANT_TASK:>
Python Code:
import kfp
import kfp.gcp as gcp
import kfp.dsl as dsl
import kfp.compiler as compiler
import kfp.components as comp
import datetime
import kubernetes as k8s
# Required Parameters
PROJECT_ID='<ADD GCP PROJECT HERE>'
GCS_BUCKET='gs://<ADD STORAGE LOCATION HERE>'
# Optional Parameters, but required for running outside Kubeflow cluster
# The host for 'AI Platform Pipelines' ends with 'pipelines.googleusercontent.com'
# The host for pipeline endpoint of 'full Kubeflow deployment' ends with '/pipeline'
# Examples are:
# https://7c021d0340d296aa-dot-us-central2.pipelines.googleusercontent.com
# https://kubeflow.endpoints.kubeflow-pipeline.cloud.goog/pipeline
HOST = '<ADD HOST NAME TO TALK TO KUBEFLOW PIPELINE HERE>'
# For 'full Kubeflow deployment' on GCP, the endpoint is usually protected through IAP, therefore the following
# will be needed to access the endpoint.
CLIENT_ID = '<ADD OAuth CLIENT ID USED BY IAP HERE>'
OTHER_CLIENT_ID = '<ADD OAuth CLIENT ID USED TO OBTAIN AUTH CODES HERE>'
OTHER_CLIENT_SECRET = '<ADD OAuth CLIENT SECRET USED TO OBTAIN AUTH CODES HERE>'
# This is to ensure the proper access token is present to reach the end point for 'AI Platform Pipelines'
# If you are not working with 'AI Platform Pipelines', this step is not necessary
! gcloud auth print-access-token
# Create kfp client
in_cluster = True
try:
k8s.config.load_incluster_config()
except:
in_cluster = False
pass
if in_cluster:
client = kfp.Client()
else:
if HOST.endswith('googleusercontent.com'):
CLIENT_ID = None
OTHER_CLIENT_ID = None
OTHER_CLIENT_SECRET = None
client = kfp.Client(host=HOST,
client_id=CLIENT_ID,
other_client_id=OTHER_CLIENT_ID,
other_client_secret=OTHER_CLIENT_SECRET)
%%bash
# Create folders if they don't exist.
mkdir -p tmp/reuse_components_pipeline/mnist_training
# Create the Python file that lists GCS blobs.
cat > ./tmp/reuse_components_pipeline/mnist_training/app.py <<HERE
import argparse
from datetime import datetime
import tensorflow as tf
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_path', type=str, required=True, help='Name of the model file.')
parser.add_argument(
'--bucket', type=str, required=True, help='GCS bucket name.')
args = parser.parse_args()
bucket=args.bucket
model_path=args.model_path
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(model.summary())
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=bucket + '/logs/' + datetime.now().date().__str__()),
# Interrupt training if val_loss stops improving for over 2 epochs
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
]
model.fit(x_train, y_train, batch_size=32, epochs=5, callbacks=callbacks,
validation_data=(x_test, y_test))
from tensorflow import gfile
gcs_path = bucket + "/" + model_path
# The export require the folder is new
if gfile.Exists(gcs_path):
gfile.DeleteRecursively(gcs_path)
tf.keras.experimental.export_saved_model(model, gcs_path)
with open('/output.txt', 'w') as f:
f.write(gcs_path)
HERE
%%bash
# Create Dockerfile.
# AI platform only support tensorflow 1.14
cat > ./tmp/reuse_components_pipeline/mnist_training/Dockerfile <<EOF
FROM tensorflow/tensorflow:1.14.0-py3
WORKDIR /app
COPY . /app
EOF
IMAGE_NAME="mnist_training_kf_pipeline"
TAG="latest" # "v_$(date +%Y%m%d_%H%M%S)"
GCR_IMAGE="gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}".format(
PROJECT_ID=PROJECT_ID,
IMAGE_NAME=IMAGE_NAME,
TAG=TAG
)
APP_FOLDER='./tmp/reuse_components_pipeline/mnist_training/'
# In the following, for the purpose of demonstration
# Cloud Build is choosen for 'AI Platform Pipelines'
# kaniko is choosen for 'full Kubeflow deployment'
if HOST.endswith('googleusercontent.com'):
# kaniko is not pre-installed with 'AI Platform Pipelines'
import subprocess
# ! gcloud builds submit --tag ${IMAGE_NAME} ${APP_FOLDER}
cmd = ['gcloud', 'builds', 'submit', '--tag', GCR_IMAGE, APP_FOLDER]
build_log = (subprocess.run(cmd, stdout=subprocess.PIPE).stdout[:-1].decode('utf-8'))
print(build_log)
else:
if kfp.__version__ <= '0.1.36':
# kfp with version 0.1.36+ introduce broken change that will make the following code not working
import subprocess
builder = kfp.containers._container_builder.ContainerBuilder(
gcs_staging=GCS_BUCKET + "/kfp_container_build_staging"
)
kfp.containers.build_image_from_working_dir(
image_name=GCR_IMAGE,
working_dir=APP_FOLDER,
builder=builder
)
else:
raise("Please build the docker image use either [Docker] or [Cloud Build]")
image_name = GCR_IMAGE
%%bash -s "{image_name}"
GCR_IMAGE="${1}"
echo ${GCR_IMAGE}
# Create Yaml
# the image uri should be changed according to the above docker image push output
cat > mnist_pipeline_component.yaml <<HERE
name: Mnist training
description: Train a mnist model and save to GCS
inputs:
- name: model_path
description: 'Path of the tf model.'
type: String
- name: bucket
description: 'GCS bucket name.'
type: String
outputs:
- name: gcs_model_path
description: 'Trained model path.'
type: GCSPath
implementation:
container:
image: ${GCR_IMAGE}
command: [
python, /app/app.py,
--model_path, {inputValue: model_path},
--bucket, {inputValue: bucket},
]
fileOutputs:
gcs_model_path: /output.txt
HERE
import os
mnist_train_op = kfp.components.load_component_from_file(os.path.join('./', 'mnist_pipeline_component.yaml'))
mnist_train_op.component_spec
mlengine_deploy_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.4.0/components/gcp/ml_engine/deploy/component.yaml')
def deploy(
project_id,
model_uri,
model_id,
runtime_version,
python_version):
return mlengine_deploy_op(
model_uri=model_uri,
project_id=project_id,
model_id=model_id,
runtime_version=runtime_version,
python_version=python_version,
replace_existing_version=True,
set_default=True)
def deployment_test(project_id: str, model_name: str, version: str) -> str:
model_name = model_name.split("/")[-1]
version = version.split("/")[-1]
import googleapiclient.discovery
def predict(project, model, data, version=None):
Run predictions on a list of instances.
Args:
project: (str), project where the Cloud ML Engine Model is deployed.
model: (str), model name.
data: ([[any]]), list of input instances, where each input instance is a
list of attributes.
version: str, version of the model to target.
Returns:
Mapping[str: any]: dictionary of prediction results defined by the model.
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}'.format(project, model)
if version is not None:
name += '/versions/{}'.format(version)
response = service.projects().predict(
name=name, body={
'instances': data
}).execute()
if 'error' in response:
raise RuntimeError(response['error'])
return response['predictions']
import tensorflow as tf
import json
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
result = predict(
project=project_id,
model=model_name,
data=x_test[0:2].tolist(),
version=version)
print(result)
return json.dumps(result)
# # Test the function with already deployed version
# deployment_test(
# project_id=PROJECT_ID,
# model_name="mnist",
# version='ver_bb1ebd2a06ab7f321ad3db6b3b3d83e6' # previous deployed version for testing
# )
deployment_test_op = comp.func_to_container_op(
func=deployment_test,
base_image="tensorflow/tensorflow:1.15.0-py3",
packages_to_install=["google-api-python-client==1.7.8"])
# Define the pipeline
@dsl.pipeline(
name='Mnist pipeline',
description='A toy pipeline that performs mnist model training.'
)
def mnist_reuse_component_deploy_pipeline(
project_id: str = PROJECT_ID,
model_path: str = 'mnist_model',
bucket: str = GCS_BUCKET
):
train_task = mnist_train_op(
model_path=model_path,
bucket=bucket
).apply(gcp.use_gcp_secret('user-gcp-sa'))
deploy_task = deploy(
project_id=project_id,
model_uri=train_task.outputs['gcs_model_path'],
model_id="mnist",
runtime_version="1.14",
python_version="3.5"
).apply(gcp.use_gcp_secret('user-gcp-sa'))
deploy_test_task = deployment_test_op(
project_id=project_id,
model_name=deploy_task.outputs["model_name"],
version=deploy_task.outputs["version_name"],
).apply(gcp.use_gcp_secret('user-gcp-sa'))
return True
pipeline_func = mnist_reuse_component_deploy_pipeline
experiment_name = 'minist_kubeflow'
arguments = {"model_path":"mnist_model",
"bucket":GCS_BUCKET}
run_name = pipeline_func.__name__ + ' run'
# Submit pipeline directly from pipeline function
run_result = client.create_run_from_pipeline_func(pipeline_func,
experiment_name=experiment_name,
run_name=run_name,
arguments=arguments)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create client
Step2: Build reusable components
Step3: Create a Docker container
Step4: Build docker image
Step5: If you want to use docker to build the image
Step6: Writing your component definition file
Step7: Define deployment operation on AI Platform
Step9: Kubeflow serving deployment component as an option. Note that, the deployed Endppoint URI is not availabe as output of this component.
Step10: Create your workflow as a Python function
Step11: Submit a pipeline run
|
13,261
|
<ASSISTANT_TASK:>
Python Code:
stations = pd.read_csv('datasets/divvy_2013/Divvy_Stations_2013.csv', parse_dates=['online date'], index_col='id')
stations
trips = pd.read_csv('datasets/divvy_2013/Divvy_Trips_2013.csv', parse_dates=['starttime', 'stoptime'], index_col=['trip_id'])
trips = trips.sort()
trips
G = nx.DiGraph()
for r, d in stations.iterrows(): # call the pandas DataFrame row-by-row iterator
G.add_node(r, attr_dict=d.to_dict())
# # Run the following code at your own risk :)
# for r, d in trips.iterrows():
# start = d['from_station_id']
# end = d['to_station_id']
# if (start, end) not in G.edges():
# G.add_edge(start, end, count=1)
# else:
# G.edge[start][end]['count'] += 1
for (start, stop), d in trips.groupby(['from_station_id', 'to_station_id']):
G.add_edge(start, stop, count=len(d))
G.edges(data=True)
centralities = nx.betweenness_centrality(G, weight='count')
sorted(centralities.items(), key=lambda x:x[1], reverse=True)
import matplotlib.pyplot as plt
%matplotlib inline
plt.bar(centralities.keys(), centralities.values())
decentrality = nx.degree_centrality(G)
plt.bar(decentrality.keys(), decentrality.values())
nx.write_gpickle(G, 'datasets/divvy_2013/divvy_graph.pkl')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: At this point, we have our stations and trips data loaded into memory.
Step2: Then, let's iterate over the stations DataFrame, and add in the node attributes.
Step3: In order to answer the question of "which stations are important", we need to specify things a bit more. Perhaps a measure such as betweenness centrality or degree centrality may be appropriate here.
Step4: First off, let's figure out how dense the graph is. The graph's density is the number of edges divided by the total number of nodes.
Step5: Applying what we learned earlier on, let's use the betweenness centrality metric.
Step6: Applying what we learned earlier, let's use the "degree centrality" metric as well.
Step7: The code above should have demonstrated to you the basic logic behind storing graph data in a human-readable format. For the richest data format, you can store a node list with attributes, and an edge list (a.k.a. adjacency list) with attributes.
|
13,262
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from astropy.table import Table as tbl
import urllib.request
import urllib.parse
import subprocess
import matplotlib.pyplot as plt
from cesium import featurize
%matplotlib inline
import sqlite3
url = "http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query?"
values = {'catalog':'ptf_objects', 'spatial':'None', 'outfmt':'1', 'selcols':'ra,dec,oid', 'constraints':'(bestchisq>100)and(ngoodobs>500)'}
subprocess.call('curl -F catalog=ptf_objects -F spatial=None -F outfmt=1 -F selcols=ra,dec,oid -F constriants="(bestchisq>100)and(ngoodobs>500)" "http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query?" -o objects.tbl', shell = True)
%%timeit
data = urllib.parse.urlencode(values)
data = data.encode('utf-8')
req = urllib.request.Request(url, data)
resp = urllib.request.urlopen(req)
respdata = resp.read()
saveFile = open('objects.tbl', 'wb')
saveFile.write(respdata)
saveFile.close()
objects = tbl.read('/home/nick/Desktop/NUREU17/LSST/VariableStarClassification/scripts/ptf_query/objects.tbl', format = 'ipac')
conn = sqlite3.connect('/home/nick/Desktop/NUREU17/LSST/VariableStarClassification/features.db')
cur = conn.cursor()
def saveFeat (lc, tName, cur, conn): #pass in lightcurve table and cursor
feats_to_use = [
'amplitude',
'flux_percentile_ratio_mid20',
'flux_percentile_ratio_mid35',
'flux_percentile_ratio_mid50',
'flux_percentile_ratio_mid65',
'flux_percentile_ratio_mid80',
'max_slope',
'maximum',
'median',
'median_absolute_deviation',
'minimum',
'percent_amplitude',
'percent_beyond_1_std',
'percent_close_to_median',
'percent_difference_flux_percentile',
'period_fast',
'qso_log_chi2_qsonu',
'qso_log_chi2nuNULL_chi2nu',
'skew',
'std',
'stetson_j',
'stetson_k',
'weighted_average',
'fold2P_slope_10percentile',
'fold2P_slope_90percentile',
'freq1_amplitude1',
'freq1_amplitude2',
'freq1_amplitude3',
'freq1_amplitude4',
'freq1_freq',
'freq1_lambda',
'freq1_rel_phase2',
'freq1_rel_phase3',
'freq1_rel_phase4',
'freq1_signif',
'freq2_amplitude1',
'freq2_amplitude2',
'freq2_amplitude3',
'freq2_amplitude4',
'freq2_freq',
'freq2_rel_phase2',
'freq2_rel_phase3',
'freq2_rel_phase4',
'freq3_amplitude1',
'freq3_amplitude2',
'freq3_amplitude3',
'freq3_amplitude4',
'freq3_freq',
'freq3_rel_phase2',
'freq3_rel_phase3',
'freq3_rel_phase4',
'freq_amplitude_ratio_21',
'freq_amplitude_ratio_31',
'freq_frequency_ratio_21',
'freq_frequency_ratio_31',
'freq_model_max_delta_mags',
'freq_model_min_delta_mags',
'freq_model_phi1_phi2',
'freq_n_alias',
'freq_signif_ratio_21',
'freq_signif_ratio_31',
'freq_varrat',
'freq_y_offset',
'linear_trend',
'medperc90_2p_p',
'p2p_scatter_2praw',
'p2p_scatter_over_mad',
'p2p_scatter_pfold_over_mad',
'p2p_ssqr_diff_over_var',
'scatter_res_raw'
]
string = "insert into " + tName + values (
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
cur.execute(select oid from {:}.format(tName))
check = cur.fetchall()
for oid in np.unique(lc['oid']):
if (oid not in check):
mask = np.logical_and(lc['oid'] == oid, lc['mag_autocorr'] > 0)
fset = featurize.featurize_time_series(lc[mask]['obsmjd'], lc[mask]['mag_autocorr'], lc[mask]['magerr_auto'],
meta_features = {'oid': str(oid)}, features_to_use = feats_to_use)
cur.execute(string, fset.get_values()[0])
else:
print('Database already contains a ',oid)
conn.commit()
cur.execute(delete from feats2)
cur.fetchall()
#curves = {}
for i in range(0,18849):
loc = 'curves_oid{:_>17}.tbl'.format(objects['oid'][i])
cmd = 'curl -F catalog=ptf_lightcurves -F spatial=None -F constraints=' + '"(oid={:})"'.format(objects['oid'][i]) + ' -F outfmt=1 -F selcols=oid,obsmjd,mag_autocorr,magerr_auto,fid,ra,dec "http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query?" -o curves_oid{:_>17}.tbl'.format(objects['oid'][i])
subprocess.call(cmd, shell = True)
print(i)
#curves = tbl.read(loc, format = 'ipac')
#saveFeat(curves, 'feats2', cur, conn)
#%run '/home/nke2/NUREU17/LSST/VariableStarClassification/saveFeat.py' (curves, 'feats2', cur, conn)
#curves[i] = tbl.read('curves_oid{:_>17}.tbl'.format(objects['oid'][i]), format = 'ipac')
#curves = {}
cur.execute(select oid from feats2)
where = cur.fetchall()
for i in range(0, 18849):
if (objects['oid'][i] not in where):
try:
curves = tbl.read('curves_oid{:_>17}.tbl'.format(objects['oid'][i]), format = 'ipac')
print(objects['oid'][i], i)
saveFeat(curves, 'feats2', cur, conn)
except:
cmd = 'curl -F catalog=ptf_lightcurves -F spatial=None -F constraints=' + '"(oid={:})"'.format(objects['oid'][i]) + ' -F outfmt=1 -F selcols=oid,obsmjd,mag_autocorr,magerr_auto,fid,ra,dec "http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query?" -o curves_oid{:_>17}.tbl'.format(objects['oid'][i])
subprocess.call(cmd, shell = True)
curves = tbl.read('curves_oid{:_>17}.tbl'.format(objects['oid'][i]), format = 'ipac')
print('queried, ', objects['oid'][i], i)
saveFeat(curves, 'feats2', cur, conn)
130122070001802
np.where(objects['oid']==35782020008798)
cur.execute(select * from feats2 where oid=35782020008798)
cur.fetchall()
fig = plt.figure(figsize = (12, 20))
for i in range(0,3):
ax = fig.add_subplot(3,1,i+1)
masked = np.where(curves[i]['mag_autocorr'] > 0 )
ax.errorbar(curves[i]['obsmjd'][masked], curves[i]['mag_autocorr'][masked], yerr = curves[i]['magerr_auto'][masked], fmt = 'bo')
ax.set_xlabel('Time(days)')
ax.set_ylabel('Observed magnitude')
plt.tight_layout()
feats_to_use = ["freq1_freq"]
want = np.where(curves[0]['mag_autocorr'] > 0)
fset = feat.featurize_time_series(times = curves[0]['obsmjd'][want], values = curves[0]['mag_autocorr'][want], errors = curves[0]['magerr_auto'][want], features_to_use = feats_to_use)
per1 = fset['freq1_freq'][0][0]
print(per1)
plt.errorbar((curves[0]['obsmjd'][want]%per1)/per1, curves[0]['mag_autocorr'][want], yerr = curves[0]['magerr_auto'][want], fmt = 'bo')
plt.xlabel('Phase')
plt.ylabel('Observed magnitude')
feats_to_use = ["freq1_freq"]
want = np.where(curves[1]['mag_autocorr'] > 0)
fset = feat.featurize_time_series(times = curves[1]['obsmjd'][want], values = curves[1]['mag_autocorr'][want], errors = curves[1]['magerr_auto'][want], features_to_use = feats_to_use)
per1 = fset['freq1_freq'][0][0]
print(per1)
plt.errorbar((curves[1]['obsmjd'][want]%per1)/per1, curves[1]['mag_autocorr'][want], yerr = curves[1]['magerr_auto'][want], fmt = 'bo')
plt.xlabel('Phase')
plt.ylabel('Observed magnitude')
feats_to_use = ["freq1_freq"]
want = np.where(curves[2]['mag_autocorr'] > 0)
fset = feat.featurize_time_series(times = curves[2]['obsmjd'][want], values = curves[2]['mag_autocorr'][want], errors = curves[2]['magerr_auto'][want], features_to_use = feats_to_use)
per1 = fset['freq1_freq'][0][0]
print(per1)
plt.errorbar((curves[2]['obsmjd'][want]%per1)/per1, curves[2]['mag_autocorr'][want], yerr = curves[2]['magerr_auto'][want], fmt = 'bo')
plt.xlabel('Phase')
plt.ylabel('Observed magnitude')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Query for the given objects
Step2: Im not sure why subprocess.call doesnt seem to work for this specific case. However, the urllib work below does the job.
Step6: Read that data file in
Step7: If reading in for the first times then use this cell. This will loop over each oid within the objects queried above and excute queries for their source lightcurves. Additionally, reads that returned data file into a dict to reference later.
Step10: Same as above. However, if the sources have already been queried just reads in the files to save some time.
Step11: In both of the above cases, the loop has been shortened to so it the script doesnt exceed memory or storage limits (and so I could interact with it much quicker).
Step12: Phase folded curves
|
13,263
|
<ASSISTANT_TASK:>
Python Code:
def mysum(a, b):
return a + b
abs(-3.2)
help("abs")
def mysum(a, b):
내가 정의한 덧셈이다.
인자 a와 b에 각각 두 숫자를 입력받아 합을 되돌려준다.
return a + b
help(mysum)
x = 2
y = 3
z = mysum(x,y)
print(z)
no_return = print(3)
print(no_return)
type(no_return)
def print42():
print(42)
def return42():
return 42
b = return42()
b
print(b)
a = print42()
a
print(a)
import math
math.pi
math.sin(math.pi / 2)
import math as m
m.pi
m.sin(m.pi / 2)
from math import sin, pi
pi
sin(pi/2)
math.cos(10)
from math import *
exp(1)
help("math")
help("math.sqrt")
def shopping(shopping_file):
file = open(shopping_file, 'r')
buy_list = file.read()
file.close()
return buy_list
print(shopping("data/shopping_list.txt"))
type(shopping("data/shopping_list.txt"))
buy_list = shopping("data/shopping_list.txt").split("\n")
print(buy_list)
for item in buy_list:
print("===")
print(item)
for i in range(len(buy_list)):
print("===")
print(buy_list[i])
for item in buy_list:
each_item = item.split()
print(each_item[0], each_item[1], "개의 가격은", each_item[2], "원 입니다.")
total = 0
for item in buy_list:
total = total + int(item.split()[2])
print("총 비용은", total, "입니다.")
shopping("shopping_list.txt")
def shopping(shopping_file):
try:
file = open(shopping_file, 'r')
buy_list = file.read()
file.close()
return buy_list
except FileNotFoundError:
raise FileNotFoundError("해당 파일의 이름 또는 경로를 확인하세요.")
shopping("shopping_list.txt")
def shopping_amount(shopping_file):
buy_list = shopping(shopping_file).split('\n')
total = 0
for item in buy_list:
total = total + int(item.split()[2])
return total
shopping_amount("data/shopping_list.txt")
def shopping_item(shopping_file, items):
buy_list = shopping(shopping_file).split('\n')
total = 0
if 'bread' in items.lower():
total += int(buy_list[0].split()[2])
if 'tomato' in items.lower():
total += int(buy_list[1].split()[2])
if 'cola' in items.lower():
total += int(buy_list[2].split()[2])
return total
item = input("Bread, Tomato, Cola 중 살 품목을 적어주세요: ")
print(shopping_item("data/shopping_list.txt", item))
def shopping_item(shopping_file, items):
buy_list = shopping(shopping_file).split('\n')
price_list = []
if 'bread' in items.lower():
price_list.append(int(buy_list[0].split()[2]))
if 'tomato' in items.lower():
price_list.append(int(buy_list[1].split()[2]))
if 'cola' in items.lower():
price_list.append(int(buy_list[2].split()[2]))
return sum(price_list)
item = input("Bread, Tomato, Cola 중 살 품목을 적어주세요: ")
print(shopping_item("data/shopping_list.txt", item))
def shopping_amount_n(shopping_file, n):
buy_list = shopping(shopping_file).split('\n')
total = 0
for i in range(len(buy_list)):
total += int(buy_list[i].split()[2]) * n[i]
return total
while True:
try:
n = list(map(int, input("Bread, Tomato, Cola을 구매할 수량을 차례대로 입력하세요: ").split(', ')))
break
except ValueError:
print("정수로 입력하세요.")
print(shopping_amount_n("data/shopping_list.txt", n))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 문서화 문자열(docstring) 활용
Step3: 보이는 내용을 설명하면 다음과 같다.
Step4: mysum 함수에 대해 알아보자.
Step5: mysum 함수를 정의할 때 추가한 문서화 문자열이 그대로 출력됨을 확인할 수 있다.
Step6: 주의
Step7: print() 함수의 리턴값은 None 이다.
Step8: 파이썬에서 다루는 값은 모두 자료형을 갖고 있으며, None의 자료형은 NoneType이라 부른다.
Step9: 주의
Step10: 모듈(Module)
Step11: 모듈 임포트 방법 2
Step12: 모듈 임포트 방법 3
Step13: 이 경우 math 모듈에 포함된 다른 코드는 사용할 수 없다.
Step14: 모듈 임포트 방법 4
Step15: math 모듈 내용 확인하기
Step16: 예제
Step17: 앞서 abs 함수에서 살펴 보았듯이 2번줄 ~ 4번 줄 내용은 sqrt 함수를 호출하는 방법과
Step18: 위 코드에서 파일 내용을 읽어드리는 부분은 다음과 같다.
Step19: 리턴값의 자료형은 유니코드 문자열이다.
Step20: 이제 문자열의 split 메소드를 활용해서, 쇼핑 목록을 리스트로 작성해보자.
Step21: buy_list에는 이제 각 줄의 내용을 항목으로 갖는 리스트가 할당된다.
Step22: 리스트에 포함된 각각의 항목에 대해 for 반복문을 실행할 수 있다.
Step23: 앞서 배운 range 함수와 인덱스를 활용하여 동일한 일을 수행할 수도 있다.
Step24: 이제 시장을 볼 때 필요한 총 비용이 얼마인지 계산할 수 있다.
Step25: 이제 총 비용을 계산할 수 있다.
Step26: 연습
Step27: 오류가 발생하면서 실행이 멈춘 이유를 설명하기는 하지만 부가 설명이 필요한 경우에는 아래와 같이
Step28: 이제 다시 shopping_list.txt 파일의 경로를 틀리게 사용하면 다음과 같이 예외 처리가 실행된다.
Step29: 연습
Step30: 연습
Step31: 견본답안 2
Step32: 연습
|
13,264
|
<ASSISTANT_TASK:>
Python Code:
df = pd.read_csv((
"https://raw.githubusercontent.com/Thinkful-Ed/data-201-resources/"
"master/ESS_practice_data/ESSdata_Thinkful.csv")).dropna()
# Define outcome and predictors.
# Set our outcome to 0 and 1.
y = df['partner'] - 1
X = df.loc[:, ~df.columns.isin(['partner', 'cntry', 'idno'])]
# Make the categorical variable 'country' into dummies.
X = pd.concat([X, pd.get_dummies(df['cntry'])], axis=1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.10)
# As originally designed, this dataset trains on non-Estonians and tests on Estonians in an imbalanced way.
# We may have better results doing a cross-validation.
# Create training and test sets.
#offset = int(X.shape[0] * 0.9)
# Put 90% of the data in the training set.
#X_train, y_train = X[:offset], y[:offset]
# And put 10% in the test set.
#X_test, y_test = X[offset:], y[offset:]
# We'll make 500 iterations, use 2-deep trees, and set our loss function.
params = {'n_estimators': 1000,
'max_depth': 3,
'loss': 'exponential',
'learning_rate' : 0.01
}
# Initialize and fit the model.
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
predict_train = clf.predict(X_train)
predict_test = clf.predict(X_test)
# Accuracy tables.
table_train = pd.crosstab(y_train, predict_train, margins=True)
table_test = pd.crosstab(y_test, predict_test, margins=True)
train_tI_errors = table_train.loc[0.0,1.0] / table_train.loc['All','All']
train_tII_errors = table_train.loc[1.0,0.0] / table_train.loc['All','All']
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
print((
'Training set accuracy:\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}\n\n'
'Test set accuracy:\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(train_tI_errors, train_tII_errors, test_tI_errors, test_tII_errors))
feature_importance = clf.feature_importances_
# Make importances relative to max importance.
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Since we're now working with a binary outcome, we've switched to a classifier. Now our loss function can't be the residuals. Our options are "deviance", or "exponential". Deviance is used for logistic regression, and we'll try that here.
Step2: Unlike decision trees, gradient boost solutions are not terribly easy to interpret on the surface. But they aren't quite a black box. We can get a measure of how important various features are by counting how many times a feature is used over the course of many decision trees.
|
13,265
|
<ASSISTANT_TASK:>
Python Code:
x, sr = librosa.load('audio/c_strum.wav')
ipd.Audio(x, rate=sr)
plt.figure(figsize=(14, 5))
librosa.display.waveplot(x, sr)
# Because the autocorrelation produces a symmetric signal, we only care about the "right half".
r = numpy.correlate(x, x, mode='full')[len(x)-1:]
print(x.shape, r.shape)
plt.figure(figsize=(14, 5))
plt.plot(r[:10000])
plt.xlabel('Lag (samples)')
plt.xlim(0, 10000)
r = librosa.autocorrelate(x, max_size=10000)
print(r.shape)
plt.figure(figsize=(14, 5))
plt.plot(r)
plt.xlabel('Lag (samples)')
plt.xlim(0, 10000)
x, sr = librosa.load('audio/oboe_c6.wav')
ipd.Audio(x, rate=sr)
r = librosa.autocorrelate(x, max_size=5000)
plt.figure(figsize=(14, 5))
plt.plot(r[:200])
midi_hi = 120.0
midi_lo = 12.0
f_hi = librosa.midi_to_hz(midi_hi)
f_lo = librosa.midi_to_hz(midi_lo)
t_lo = sr/f_hi
t_hi = sr/f_lo
print(f_lo, f_hi)
print(t_lo, t_hi)
r[:int(t_lo)] = 0
r[int(t_hi):] = 0
plt.figure(figsize=(14, 5))
plt.plot(r[:1400])
t_max = r.argmax()
print(t_max)
float(sr)/t_max
librosa.midi_to_hz(84)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: numpy.correlate
Step2: Plot the autocorrelation
Step3: librosa.autocorrelate
Step4: librosa.autocorrelate conveniently only keeps one half of the autocorrelation function, since the autocorrelation is symmetric. Also, the max_size parameter prevents unnecessary calculations.
Step5: Compute and plot the autocorrelation
Step6: The autocorrelation always has a maximum at zero, i.e. zero lag. We want to identify the maximum outside of the peak centered at zero. Therefore, we might choose only to search within a range of reasonable pitches
Step7: Set invalid pitch candidates to zero
Step8: Find the location of the maximum
Step9: Finally, estimate the pitch in Hertz
Step10: Indeed, that is very close to the true frequency of C6
|
13,266
|
<ASSISTANT_TASK:>
Python Code:
# General imports
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
# Figure config
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
LEGEND_SIZE = 15
TITLE_SIZE = 25
AXIS_SIZE = 15
FIGURE_SIZE = (12, 8)
# for reproducibility
np.random.seed(42)
# SEIR model and Gillespie simulation
import Emukit_task_seir_model as seir
# GPy for the model
import GPy
# Emukit model wrapper
from emukit.model_wrappers.gpy_quadrature_wrappers import RBFGPy, BaseGaussianProcessGPy
from emukit.model_wrappers import GPyModelWrapper
# Emukit core
from emukit.core import ParameterSpace, ContinuousParameter
# Emukit quadrature
from emukit.quadrature.kernels.integral_bounds import IntegralBounds
from emukit.quadrature.kernels import QuadratureRBF
from emukit.quadrature.methods import VanillaBayesianQuadrature
from emukit.quadrature.loop import VanillaBayesianQuadratureLoop
# Emukit experimental design
from emukit.experimental_design import ExperimentalDesignLoop
population_size = 100 # size of population N
alpha_init = 10. # infection rate/recovery rate
beta = 4. # incubation rate/recovery rate
t_end = 10. # end time of simulation
t_eval = np.linspace(0., t_end, 200) # fixed time steps for simulation
num_gil = 1000 # number of gillespie runs, 1000 will give roughly noisfree observations
# Set up the gillespie model
seir_model = seir.SEIR(population_size, alpha_init, beta)
seir_gil = seir.SEIRGillespie(seir_model)
# Prior over alpha. This prior will be integrated against in Task 1 and 2
alpha_prior = seir.GammaPrior(5., 1., 5.)
# computes expected time occurance and height of the infection peak for a given alpha.
meanmax = seir.MeanMaxInfectionGillespie(seir_gil, num_gil, t_end, alpha_prior)
# define the integrand
f_height_of_peak_weighted = seir.height_of_peak_weighted(meanmax)
# define integral bounds
lb = 1. # lower integral bound
ub = 80. # upper integral bound
# number of initial points
N_init_task1 = 3
# inital traning points: locations X_init sampled uniformly from [lower_bound, upper_bound].
X_init_task1 = np.random.rand(N_init_task1, 1)*(ub-lb) + lb
Y_init_task1 = f_height_of_peak_weighted(X_init_task1)
# make your GP model in GPy.
gpy_model_task1 = GPy.models.GPRegression(X=X_init_task1, Y=Y_init_task1,
kernel=GPy.kern.RBF(input_dim=X_init_task1.shape[1],
lengthscale=10.0, variance=1.0))
# create emukit quadrature GP model
emukit_qrbf = QuadratureRBF(rbf_kernel=RBFGPy(gpy_model_task1.kern),
integral_bounds=[(lb, ub)])
emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=gpy_model_task1)
# choose vanilla Bayesian quadrature method and make loop
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
# run the loop for a number of iterations and collect result
num_iter = 5
emukit_loop.run_loop(user_function=f_height_of_peak_weighted, stopping_condition=num_iter)
height_of_peak_estimate, height_of_peak_variance = emukit_loop.model.integrate()
x_plot = np.linspace(lb, ub, 100)[:, None]
mu_plot, var_plot = emukit_loop.model.predict(x_plot)
X_plot = emukit_loop.loop_state.X
Y_plot = emukit_loop.loop_state.Y
plt.figure(figsize=FIGURE_SIZE)
plt.plot(X_plot, Y_plot, "ro", markersize=10, label="Observations")
plt.plot(X_init_task1, Y_init_task1, "bo", markersize=10, label="Initial observations")
plt.plot(x_plot, mu_plot, "C0", label="Model")
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - np.sqrt(var_plot)[:, 0], color="C0", alpha=0.6)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 2 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 2 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.4)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 3 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 3 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.2)
plt.legend(loc=1, prop={'size': LEGEND_SIZE})
plt.xlabel(r"ratio of infection rate and recovery rate")
plt.ylabel(r"weighted height of infection peak")
plt.grid(True)
plt.xlim(lb, ub)
plt.show()
initial_integral_mean = height_of_peak_estimate
initial_integral_variance = height_of_peak_variance
x_plot_integral = np.linspace(initial_integral_mean-5*np.sqrt(initial_integral_variance),
initial_integral_mean+5*np.sqrt(initial_integral_variance), 200)
y_plot_integral_initial = 1/np.sqrt(initial_integral_variance * 2 * np.pi) * \
np.exp( - (x_plot_integral - initial_integral_mean)**2 / (2 * initial_integral_variance) )
plt.figure(figsize=FIGURE_SIZE)
plt.plot(x_plot_integral, y_plot_integral_initial, "k", label="integral density")
plt.axvline(initial_integral_mean, color="red", label="integral estimate", \
linestyle="--")
plt.legend(loc=2, prop={'size': LEGEND_SIZE})
plt.xlabel(r"expected height of infection peak in number of individuals")
plt.ylabel(r"pdf of expected height of infection peak in number of individuals")
plt.grid(True)
plt.xlim(np.min(x_plot_integral), np.max(x_plot_integral))
plt.show()
print('=== TASK 1: Result ===')
print()
print('The estimated expected height of the infection peak is: ', round(height_of_peak_estimate), 'individuals,')
print('with a credible interval: ', int(np.ceil(2*np.sqrt(height_of_peak_variance))), 'individuals.')
# define the integrand
f_time_of_peak_weighted = seir.time_of_peak_weighted(meanmax)
# number of initial points
N_init_task2 = 3
# inital traning points: locations X_init sampled uniformly from [lower_bound, upper_bound].
X_init_task2 = np.random.rand(N_init_task2, 1)*(ub-lb) + lb
Y_init_task2 = f_time_of_peak_weighted(X_init_task2)
# make your GP model in GPy.
gpy_model_task2 = GPy.models.GPRegression(X=X_init_task2, Y=Y_init_task2,
kernel=GPy.kern.RBF(input_dim=X_init_task2.shape[1],
lengthscale=10.0, variance=1.0))
# create emukit quadrature GP model
emukit_qrbf = QuadratureRBF(rbf_kernel=RBFGPy(gpy_model_task2.kern),
integral_bounds=[(lb, ub)])
emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=gpy_model_task2)
# choose vanilla Bayesian quadrature method and make loop
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
# run the loop for a number of iterations and collect result
num_iter = 5
emukit_loop.run_loop(user_function=f_time_of_peak_weighted, stopping_condition=num_iter)
time_of_peak_estimate, time_of_peak_variance = emukit_loop.model.integrate()
x_plot = np.linspace(lb, ub, 100)[:, None]
mu_plot, var_plot = emukit_loop.model.predict(x_plot)
X_plot = emukit_loop.loop_state.X
Y_plot = emukit_loop.loop_state.Y
plt.figure(figsize=(12, 8))
plt.plot(X_plot, Y_plot, "ro", markersize=10, label="Observations")
plt.plot(X_init_task2, Y_init_task2, "bo", markersize=10, label="Initial observations")
plt.plot(x_plot, mu_plot, "C0", label="Model")
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - np.sqrt(var_plot)[:, 0], color="C0", alpha=0.6)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 2 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 2 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.4)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 3 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 3 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.2)
plt.legend(loc=1, prop={'size': LEGEND_SIZE})
plt.xlabel(r"ratio of infection rate and recovery rate")
plt.ylabel(r"weighted time-occurance of infection peak")
plt.grid(True)
plt.xlim(lb, ub)
plt.show()
initial_integral_mean = time_of_peak_estimate
initial_integral_variance = time_of_peak_variance
x_plot_integral = np.linspace(initial_integral_mean-5*np.sqrt(initial_integral_variance),
initial_integral_mean+5*np.sqrt(initial_integral_variance), 200)
y_plot_integral_initial = 1/np.sqrt(initial_integral_variance * 2 * np.pi) * \
np.exp( - (x_plot_integral - initial_integral_mean)**2 / (2 * initial_integral_variance) )
plt.figure(figsize=FIGURE_SIZE)
plt.plot(x_plot_integral, y_plot_integral_initial, "k", label="integral density")
plt.axvline(initial_integral_mean, color="red", label="integral estimate", \
linestyle="--")
plt.legend(loc=2, prop={'size': LEGEND_SIZE})
plt.xlabel(r"expected time-occurance of infection peak")
plt.ylabel(r"pdf of expected time-occurance of infection peak in time units")
plt.grid(True)
plt.xlim(np.min(x_plot_integral), np.max(x_plot_integral))
plt.show()
print('=== TASK 2: Result ===')
print()
print('The estimated expected time of the infection peak is: ', round(time_of_peak_estimate, 2), 'time units.')
print('with a credible interval: ', round(2*np.sqrt(time_of_peak_variance), 2), 'time units.')
# define the user function
f_height_of_peak = seir.height_of_peak(meanmax)
# this is the infection rate ratio we are intereted in at a later point. We can define it here
# already since this is a tutorial. It will not be used by Emukit to train the model.
alpha_unobserved = np.array([[5.]])
# number of initial points
N_init_task3 = 3
# inital traning points: locations X_init sampled uniformly from [lower_bound, upper_bound].
X_init_task3 = np.random.rand(N_init_task3, 1)*(ub-lb) + lb
Y_init_task3 = f_height_of_peak(X_init_task3)
# make your GP model in GPy.
gpy_model_task3 = GPy.models.GPRegression(X=X_init_task3, Y=Y_init_task3,
kernel=GPy.kern.RBF(input_dim=X_init_task1.shape[1],
lengthscale=10.0, variance=1.0))
gpy_model_task3.Gaussian_noise.constrain_fixed(1.e-10)
# create emukit GP model
model_emukit = GPyModelWrapper(gpy_model=gpy_model_task3)
# Create the parameters space
parameter_space_emukit = ParameterSpace([ContinuousParameter('alpha', lb, ub)])
# Create the experimental design object
loop_emukit = ExperimentalDesignLoop(space=parameter_space_emukit, model=model_emukit)
# run the loop for a number of iterations and collect predictions
num_iter = 5
loop_emukit.run_loop(user_function=f_height_of_peak, stopping_condition=num_iter)
predicted_height_of_peak_mean, predicted_height_of_peak_variance \
= loop_emukit.model.predict(alpha_unobserved)
x_plot = np.linspace(lb, ub, 100)[:, None]
mu_plot, var_plot = loop_emukit.model.predict(x_plot)
X_plot = loop_emukit.loop_state.X
Y_plot = loop_emukit.loop_state.Y
plt.figure(figsize=FIGURE_SIZE)
plt.plot(X_plot, Y_plot, "ro", markersize=10, label="Observations")
plt.plot(X_init_task3, Y_init_task3, "bo", markersize=10, label="Initial observations")
plt.plot(x_plot, mu_plot, "C0", label="Model")
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - np.sqrt(var_plot)[:, 0], color="C0", alpha=0.6)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 2 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 2 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.4)
plt.fill_between(x_plot[:, 0],
mu_plot[:, 0] + 3 * np.sqrt(var_plot)[:, 0],
mu_plot[:, 0] - 3 * np.sqrt(var_plot)[:, 0], color="C0", alpha=0.2)
plt.axvline(alpha_unobserved[0, 0], color="red", label="location for prediction", \
linestyle="--")
plt.legend(loc=4, prop={'size': LEGEND_SIZE})
plt.xlabel(r"ratio of infection rate and recovery rate")
plt.ylabel(r"height of infection peak in number of individuals")
plt.grid(True)
plt.xlim(lb, ub)
plt.show()
print()
print('=== TASK 3 Result ===')
print('The estimated height of the infection peak at the unobserved gillespie run with alpha =',
round(alpha_unobserved[0, 0]),' is: ', int(round(predicted_height_of_peak_mean[0, 0])),
'individuals, with a credible interval: ', int(np.ceil(2*np.sqrt(predicted_height_of_peak_variance[0, 0]))), 'individuals.')
print('=== TASK 1 ===')
print('The estimated expected height of the infection peak is: ', round(height_of_peak_estimate), 'individuals,')
print('with a credible interval: ', np.ceil(2*np.sqrt(height_of_peak_variance)), 'individuals.')
print()
print('=== TASK 2 ===')
print('The estimated expected time of the infection peak is: ', round(time_of_peak_estimate, 2), 'time units.')
print('with a credible interval: ', round(2*np.sqrt(time_of_peak_variance), 2), 'time units.')
print()
print('=== TASK 3 ===')
print('The estimated height of the infection peak at the unobserved gillespie run with alpha =',
round(alpha_unobserved[0, 0]),' is: ', int(round(predicted_height_of_peak_mean[0, 0])),
'individuals, with a credible interval: ', int(np.ceil(2*np.sqrt(predicted_height_of_peak_variance[0, 0]))), 'individuals.')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The SEIR epidemic model
Step2: Load SEIR model
Step3: <a id='section-task1'></a>
Step4: Get initial observations
Step5: Create Emukit surrogate model (the emulator) for the integrand function
Step6: Print and plot result
Step7: <a id='section-task2'></a>
Step8: Print and plot result
Step9: <a id='section-task3'></a>
Step10: Print and plot result
Step11: <a id='conclusion'></a>
|
13,267
|
<ASSISTANT_TASK:>
Python Code:
import geopandas as gpd
denue = gpd.read_file("datos/DENUE_INEGI_09_.shp")
denue.head()
denue[["codigo_act", "nom_estab"]].head()
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.randn(3, 3))
df
def square(x):
return x**2
df[[0]].apply(square)
df['squared'] = df[[0]].apply(square)
df
def clasifica(codigo):
comercio = ['461', '462', '463', '464', '465', '466']
oficinas = ['51', '521', '523', '524', '5312', '5313', '541', '55']
ocio = ['711121', '71212', '7132', '7139', '7211', '7224', '7225']
usos = {'comercio': comercio, 'oficinas':oficinas, 'ocio': ocio}
for actividad, claves in usos.items():
for c in claves:
if str(codigo).startswith(c):
return actividad
clases = denue['codigo_act'].apply(clasifica)
denue['clase'] = clases
denue[['codigo_act', 'clase']][denue['clase']== 'oficinas'].head()
denue.loc[denue['clase'].isnull()].head()
denue = denue.loc[denue['clase'].notnull()]
denue.head()
def concatena_claves(x):
return '{}{}{}{}'.format (x['cve_ent'], x['cve_mun'], x['cve_loc'], x['ageb'])
denue.apply(concatena_claves, axis=1).head()
print('{} {} {}'.format('a', 'b', 'c'))
denue['cve_geo'] = denue.apply(concatena_claves, axis=1)
denue[['cve_geo','clase']].head()
variables = denue[['cve_geo','clase']]
variables.head()
pd.get_dummies(variables, columns=['clase']).head(20)
variables = pd.get_dummies(variables, columns=['clase'])
por_ageb = variables.groupby(['cve_geo']).sum()
por_ageb.head()
agebs = gpd.read_file("datos/ageb_urb.shp")
agebs.head()
agebs = agebs[['CVEGEO', 'geometry']]
agebs.head()
usos_suelo = agebs.merge(por_ageb, left_on='CVEGEO', right_index=True, how='inner')
usos_suelo.head()
%matplotlib inline
usos_suelo.plot(column='clase_comercio', figsize=(20,10))
usos_suelo.plot(column='clase_comercio', figsize=(20,10), cmap='OrRd',scheme='quantiles')
usos_suelo.plot(column='clase_comercio', figsize=(20,10), cmap='OrRd',scheme='fisher_jenks')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Como pueden ver, el archivo tiene 42 columnas, las que nos interesan en este momento son las que describen la actividad de cada unidad
Step2: El código de la actividad corresponde a la clasificación del SCIAN y sirve para clasificar las actividades económicas. Para este taller, vamos a agregar diferentes actividades económicas en tres grupos de usos de suelo
Step3: Lo primero que hicimos fue importar dos librerías
Step4: Como pueden ver, el resultado es una serie (una columna) con el resultado de aplicar nuestra función a cada renglón de la serie 0. Esta nueva columna la podemos agregar fácilmente a nuestros datos originales
Step5: Ejercicio rápido
Step6: La función parece bastante complicada, pero si lo pensamos con clama en realidad es bastante sencilla
Step7: Ejercicio rápido
Step8: Fíjense como estamos usando aquí loc para seleccionar filas que cumplan con una condición. Pandas tiene varias formas de seleccionar (ya hemos usado por lo menos dos aquí). Aquí puedes ver la documentación oficial de los métodos para seleccionar filas en Pandas, en esta otra liga hay algunos ejemplos extra.
Step9: Ejercicio rápido
Step10: Fíjense que aquí estamos introduciendo el operador de formato de caracteres para rellenar un string con los valores que obtenemos de las columnas. Aquí hay un ejemplo rápido para que vean cómo funciona format
Step11: Entonces, ya sabemos como calcular nuestro identificador único de AGEB, ahora agreguémoslo como columna
Step12: Ahora sí, ya que tenemos un identificador de AGEB para cada punto, podemos agregarlos por AGEB y calcular cuantas ocurrencias de cada una de nuestras categorías hay en cada AGEB, como lo haríamos con un group by de SQL. El problema en este caso es que las clases son valores a lo largo de una única columna, de modo que no tenemos una forma directa de agregar la cantidad de ocurrencias de cada uso de suelo. Entonces, antes de agregar por AGEB es necesario promover los valores de cada clase a columnas, es decir, agregar 3 columnas a nuestra base de datos
Step13: Fíjense que lo que queremos hacer es codificar la variable clase como tres variables dummies, es decir convertir la columna en tres columnas indicadoras. Pandas nos provee el método get_dummies para hacer esto
Step14: Ahora tenemos una columna por cada clase que nos indica si el punto es o no de dicha clase. Entonces ya sólo es cuestión de agrupar por nuestro identificador de AGEB y sumar las columnas indicadoras
Step15: Ahora sí, finalmente tenemos la cantidad de ocurrencias de cada uso de suelo para cada AGEB, Ahora sólo necesitamos unirlas con la geometría de las AGEBS y tendremos nuestros datos listos!
Step16: Como sólo vamos a utilizar las coumnas de geometría y el identificador de AGEB, guardemos el GeoDataFrame con sólo esas columnas
Step17: Ahora vamos a unir las dos bases de datos
Step18: Fíjense que el merge funciona exáctamente igual que un join en SQL, la única peculiaridad en este caso es el uso de right_index=True. Esto es necesario porque, por la forma en la que hicimos el DataFrame por_ageb, la clave de AGEB es el índice de la tabla y no una columna (de alguna forma, el índice es el nombre de cada fila, se parece bastante a una columna pero no tienen nombre).
Step19: Como se ve, el mapa no muestra grandes diferencias, eso es porque por defecto, GeoPandas clasifica en intervalos iguales. Si queremos destacar las diferencias, podemos usar otros métodos de clasificación (esta opción sólo está disponible si PySal está también instalado)
|
13,268
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# plots graphs within the notebook
%config InlineBackend.figure_format='svg' # not sure what this does, may be default images to svg format
from IPython.display import Image
from IPython.core.display import HTML
def header(text):
raw_html = '<h4>' + str(text) + '</h4>'
return raw_html
def box(text):
raw_html = '<div style="border:1px dotted black;padding:2em;">'+str(text)+'</div>'
return HTML(raw_html)
def nobox(text):
raw_html = '<p>'+str(text)+'</p>'
return HTML(raw_html)
def addContent(raw_html):
global htmlContent
htmlContent += raw_html
class PDF(object):
def __init__(self, pdf, size=(200,200)):
self.pdf = pdf
self.size = size
def _repr_html_(self):
return '<iframe src={0} width={1[0]} height={1[1]}></iframe>'.format(self.pdf, self.size)
def _repr_latex_(self):
return r'\includegraphics[width=1.0\textwidth]{{{0}}}'.format(self.pdf)
class ListTable(list):
Overridden list class which takes a 2-dimensional list of
the form [[1,2,3],[4,5,6]], and renders an HTML Table in
IPython Notebook.
def _repr_html_(self):
html = ["<table>"]
for row in self:
html.append("<tr>")
for col in row:
html.append("<td>{0}</td>".format(col))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
font = {'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 18,
}
import matplotlib.pyplot as plt
import numpy as np
Lx = 2.*np.pi
Nx = 512
u = np.zeros(Nx,dtype='float64')
du = np.zeros(Nx,dtype='float64')
ddu = np.zeros(Nx,dtype='float64')
k_0 = 2.*np.pi/Lx
dx = Lx/Nx
x = np.linspace(dx,Lx,Nx)
Nwave = 33
uwave = np.zeros((Nx,Nwave),dtype='float64')
duwave = np.zeros((Nx,Nwave),dtype='float64')
dduwave = np.zeros((Nx,Nwave),dtype='float64')
ampwave = np.random.random(Nwave)
phasewave = np.random.random(Nwave)*2*np.pi
for iwave in range(Nwave):
uwave[:,iwave] = ampwave[iwave]*np.cos(k_0*iwave*x+phasewave[iwave])
duwave[:,iwave] = -k_0*iwave*ampwave[iwave]*np.sin(k_0*iwave*x+phasewave[iwave])
dduwave[:,iwave] = -(k_0*iwave)**2*ampwave[iwave]*np.cos(k_0*iwave*x+phasewave[iwave])
u = np.sum(uwave,axis=1)
du = np.sum(duwave,axis=1)
ddu = np.sum(dduwave,axis=1)
#print(u)
plt.plot(x,u,lw=2)
plt.xlim(0,Lx)
#plt.legend(loc=3, bbox_to_anchor=[0, 1],
# ncol=3, shadow=True, fancybox=True)
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$u$', fontdict = font)
plt.xticks(fontsize = 16)
plt.yticks(fontsize = 16)
plt.show()
plt.plot(x,du,lw=2)
plt.xlim(0,Lx)
#plt.legend(loc=3, bbox_to_anchor=[0, 1],
# ncol=3, shadow=True, fancybox=True)
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$du/dx$', fontdict = font)
plt.xticks(fontsize = 16)
plt.yticks(fontsize = 16)
plt.show()
plt.plot(x,ddu,lw=2)
plt.xlim(0,Lx)
#plt.legend(loc=3, bbox_to_anchor=[0, 1],
# ncol=3, shadow=True, fancybox=True)
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$d^2u/dx^2$', fontdict = font)
plt.xticks(fontsize = 16)
plt.yticks(fontsize = 16)
plt.show()
Nitermax = 1000
it = 0
error_threshold = 1e-6
error = np.inf
phi = np.zeros(Nx)
phi_old = np.zeros(Nx)
error_jacobi = np.zeros(Nitermax)
b = np.zeros(Nx)
#generate rhs
b[1:Nx-1] = 0.75*(u[2:Nx]-u[0:Nx-2])/dx
b[0] = 0.75*(u[1]-u[Nx-1])/dx
b[Nx-1] = 0.75*(u[0]-u[Nx-2])/dx
for it in range(Nitermax):
phi_old = np.copy(phi)
phi[1:Nx-1] = -0.25*(phi_old[0:Nx-2] + phi_old[2:Nx]) \
+b[1:Nx-1]
phi[0] = -0.25*(phi_old[1] + phi_old[Nx-1]) \
+b[0]
phi[Nx-1] = -0.25*(phi_old[Nx-2] + phi_old[0]) \
+b[Nx-1]
error_jacobi[it] = np.max(np.abs(phi-phi_old))
if (error_jacobi[it] < error_threshold): break
#print(error)
it_jacobi = it
plt.semilogy(error_jacobi[0:it_jacobi+1],lw=2,label='Jacobi')
plt.xlabel('Iterations', fontdict = font)
plt.ylabel('error', fontdict = font)
plt.xticks(fontsize = 16)
plt.yticks(fontsize = 16)
plt.legend(loc=3, bbox_to_anchor=[0, 1],
ncol=3, shadow=True, fancybox=True)
plt.show()
plt.plot(x,phi-du)
plt.show()
print(np.max(np.abs(phi-du)))
Nitermax = 1000
it = 0
error_threshold = 1e-6
phi = np.zeros(Nx)
phi_old = np.zeros(Nx)
error_jacobi = np.inf*np.ones(Nitermax)
b = np.zeros(Nx)
A = np.zeros((Nx,Nx))
for i in range(Nx):
if (i == 0):
A[i,i] = 4.
A[i,i+1] = 1.
A[i,Nx-1] = 1.
b[0] = 3./dx*(u[1] - u[Nx-1])
elif (i == Nx-1):
A[i,i-1] = 1.
A[i,i] = 4.
A[i,0] = 0.
b[i] = 3./dx*(u[0] - u[Nx-2])
else:
A[i,i-1] = 1.
A[i,i] = 4.
A[i,i+1] = 1.
b[i] = 3./dx*(u[i+1] - u[i-1])
D = np.diag(A)
B = np.diagflat(D)
C = A - B
for it in range (Nitermax):
phi_old = np.copy(phi)
phi = (b-np.dot(C,phi_old))/D
error_jacobi[it] = np.max(np.abs(phi-phi_old))
if (error_jacobi[it] < error_threshold): break
#print(error)
it_jacobi = it
plt.semilogy(error_jacobi[0:it_jacobi+1],lw=2,label='Jacobi')
plt.xlabel('Iterations', fontdict = font)
plt.ylabel('error', fontdict = font)
plt.xticks(fontsize = 16)
plt.yticks(fontsize = 16)
plt.legend(loc=3, bbox_to_anchor=[0, 1],
ncol=3, shadow=True, fancybox=True)
plt.show()
print(np.max(np.abs(phi-du)))
!ipython nbconvert --to html ME249-Lecture-4-YOURNAME.ipynb
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lecture 4
Step2: <p class='alert alert-success'>
Step3: <h2>Compact Finite Difference Schemes</h2>
Step4: This is the Matrix approach
Step5: <h4>Gauss-Seidel Method</h4>
|
13,269
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-lm', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
13,270
|
<ASSISTANT_TASK:>
Python Code:
%%capture
# install histogrammar (if not installed yet)
import sys
!"{sys.executable}" -m pip install histogrammar
import histogrammar as hg
import pandas as pd
import numpy as np
import matplotlib
# open a pandas dataframe for use below
from histogrammar import resources
df = pd.read_csv(resources.data("test.csv.gz"), parse_dates=["date"])
df.head(2)
# this creates a histogram with 100 even-sized bins in the (closed) range [-5, 5]
hist1 = hg.Bin(num=10, low=0, high=100)
hist1.fill.numpy(df['age'].values)
hist1.plot.matplotlib();
hist2 = hg.SparselyBin(binWidth=10, origin=0)
hist2.fill.numpy(df['age'].values)
hist2.plot.matplotlib();
hist1
hist2
histx = hg.Categorize('eyeColor')
histx.fill.numpy(df)
hists = df.hg_make_histograms(features=['isActive'])
# hist1 = hg.Categorize(quantity='isActive')
# hist2 = hg.Categorize(quantity='gender', value=hist1)
# hist3 = hg.Categorize(quantity='favoriteFruit')
hist1 = hg.Average(quantity='latitude')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dataset
Step2: Comparing histogram types
Step3: Q
Step4: Q
Step5: Q
Step6: Multi-dimensional histograms
Step7: Q
|
13,271
|
<ASSISTANT_TASK:>
Python Code:
retorno = api.update_with_media(filename='fia.jpg',status='Test. Upload media via python')
print(retorno.text)
print(retorno.id)
print(retorno.created_at)
print(retorno.lang)
print(retorno.text)
print(retorno.user.screen_name)
print(retorno.user.friends_count)
print(retorno.user.time_zone)
retornoDestroy = api.destroy_status(retorno.id)
home = api.home_timeline(count=10)
for i, tweet in enumerate(home):
print(tweet.user.screen_name)
print(tweet.text)
print(tweet.user.id)
print('\n')
for i, user in enumerate(home):
help(api.user_timeline)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercício 2 - Salve o retorno do tweet do exercício anterior e imprima as seguintes informações
Step2: Exercício 3 - Utilizando o método home_timeline(), recupere os 10 tweets atuais. Para cada um desses tweets, imprima
Step3: Exercício 4 - Para cada tweet do exercício anterior, utilize o id do usuário e imprima o texto dos 5 primeiros tweets de cada um dos 10 usuários (user_timeline).
|
13,272
|
<ASSISTANT_TASK:>
Python Code:
from scipy import stats
import random
import numpy as np
def poisson_simul(rate, T):
time = random.expovariate(rate)
times = [0]
while (times[-1] < T):
times.append(time+times[-1])
time = random.expovariate(rate)
return times[1:]
rate = 1.0
T = 100.0
times = poisson_simul(rate, T)
result = stats.kstest(times, stats.uniform(loc=0, scale=T).cdf)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
13,273
|
<ASSISTANT_TASK:>
Python Code:
import math
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def sine(x):
return np.sin(2 * math.pi * x)
x = np.linspace(0., 1., num=256, endpoint=False)
plt.plot(x, sine(x))
import magma as m
m.set_mantle_target("ice40")
import mantle
from loam.boards.icestick import IceStick
N = 8
icestick = IceStick()
icestick.Clock.on()
for i in range(N):
icestick.J3[i].output().on()
main = icestick.main()
counter = mantle.Counter(32)
sawtooth = counter.O[8:8+8]
wavetable = 128 + 127 * sine(x)
wavetable = [int(x) for x in wavetable]
rom = mantle.Memory(height=256, width=16, rom=list(wavetable), readonly=True)
m.wire( rom(sawtooth)[0:8], main.J3 )
m.wire( 1, rom.RE )
m.EndCircuit()
m.compile('build/sin', main)
%%bash
cd build
cat sin.pcf
yosys -q -p 'synth_ice40 -top main -blif sin.blif' sin.v
arachne-pnr -q -d 1k -o sin.txt -p sin.pcf sin.blif
icepack sin.txt sin.bin
iceprog sin.bin
import csv
import magma as m
with open("data/sine-capture.csv") as sine_capture_csv:
csv_reader = csv.reader(sine_capture_csv)
next(csv_reader, None) # skip the headers
rows = [row for row in csv_reader]
timestamps = [float(row[0]) for row in rows]
values = [m.bitutils.seq2int(tuple(int(x) for x in row[1:])) for row in rows]
plt.plot(timestamps[:250], values[:250], "b.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To implement our sine wave generator, we'll use a counter to index into a ROM that is programmed to output the value of discrete points in the sine wave.
Step2: Compile and test.
Step3: We can wire up the GPIO pins to a logic analyzer to verify that our circuit produces the correct sine waveform.
Step4: TODO
|
13,274
|
<ASSISTANT_TASK:>
Python Code:
import time
print('Last updated: %s' %time.strftime('%d/%m/%Y'))
import platform
import multiprocessing
def print_sysinfo():
print('\nPython version :', platform.python_version())
print('compiler :', platform.python_compiler())
print('\nsystem :', platform.system())
print('release :', platform.release())
print('machine :', platform.machine())
print('processor :', platform.processor())
print('CPU count :', multiprocessing.cpu_count())
print('interpreter :', platform.architecture()[0])
print('\n\n')
print_sysinfo()
def python_bubblesort(a_list):
Bubblesort in Python for list objects (sorts in place).
length = len(a_list)
for i in range(length):
for j in range(1, length):
if a_list[j] < a_list[j-1]:
a_list[j-1], a_list[j] = a_list[j], a_list[j-1]
return a_list
def python_bubblesort_improved(a_list):
Bubblesort in Python for list objects (sorts in place).
length = len(a_list)
swapped = 1
for i in range(length):
if swapped:
swapped = 0
for ele in range(length-i-1):
if a_list[ele] > a_list[ele + 1]:
temp = a_list[ele + 1]
a_list[ele + 1] = a_list[ele]
a_list[ele] = temp
swapped = 1
return a_list
import random
import copy
random.seed(4354353)
l = [random.randint(1,1000) for num in range(1, 1000)]
l_sorted = sorted(l)
for f in [python_bubblesort, python_bubblesort_improved]:
assert(l_sorted == f(copy.copy(l)))
print('Bubblesort works correctly')
# small list
l_small = [random.randint(1,100) for num in range(1, 100)]
l_small_cp = copy.copy(l_small)
%timeit python_bubblesort(l_small)
%timeit python_bubblesort_improved(l_small_cp)
# larger list
l_small = [random.randint(1,10000) for num in range(1, 10000)]
l_small_cp = copy.copy(l_small)
%timeit python_bubblesort(l_small)
%timeit python_bubblesort_improved(l_small_cp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sorting Algorithms
Step2: Bubble sort
Step4: Bubble sort implemented in (C)Python
Step6: <br>
Step7: Verifying that all implementations work correctly
Step8: Performance comparison
|
13,275
|
<ASSISTANT_TASK:>
Python Code:
col.find_one({'name': 'Alessandro'}) #find first value equality
list(col.find({'name': 'Alessandro'})) #find all value equality
cursor = col.find({'name': 'Alessandro'}) #can also use it as a generator
cursor.next()
col.find_one({'name': 'Alessandro'}, {'phone': True}) #this is a projection
col.remove({'name': 'Alessandro'})
col.insert_one({'name': 'Alessandro', 'phone': '1'})
col.insert_one({'name': 'Alessandro', 'phone': '2'})
col.insert_one({'name': 'Alessandro', 'phone': '3'})
list(col.find({'name': 'Alessandro'}))
#remove all entries Alessandro and add 3 noew ones with different phone number
col.find_one({'_id': {'$gt': bson.ObjectId('575fd53c9d1fa214962baae1')}}) #retrieve first entries with number higher than ObjectID of the first entry
list(col.find({'name': {'$regex': '^O'}}))
col.update({'name': 'Alessandro'}, {'name': 'Giovanni'}) #changes one document from Alessandro to Giovanni
col.update({'name': 'Alessandro'}, {'name': 'Giovanni'}) #changes all document from Alessandro to Giovanni
col.find_one({'name': 'Giovanni'}
col.find({'name': 'Alessandro'}).count()
col.update({'name': 'Giovanni'}, {'$set': {'phone': '+23244322'}}) #
col.find_one({'name': 'Giovanni'})
doc = col.find_one({'name': 'Giovanni'})
doc['phone'] = 5
col.save(doc)
print doc
col.update({'name': 'Alessandro'}, {'$set': {'name': 'Someone'}}, multi= True)
list(col.find({'name': 'Someone'}))
post = {
'title': 'First post',
'author':{
'name': 'Alessandro',
'surname': 'Molina',
'avatar': 'https://blog.madmimi.com/wp-content/uploads/2014/06/gary_gravatar.png'
},
'tags': ['mongodb', 'web', 'scaling']
}
db.blog.insert(post)
db.blog.find_one({'tags': 'mongodb'})
db.blog.find_one({'author.name': 'Alessandro'}) #query on subdocs
class MyClass():
def __init__(self, x, y):
self.x = x
self.y = y
c = MyClass(1,2)
print c.__dict__
db.blog.insert(c.__dict__)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Updating the document replaces the whole original document
Step2: Here's how to update all doc
|
13,276
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import sklearn
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn import metrics
import matplotlib.pyplot as plt
import scipy
import math
import tensorflow as tf
import skflow
train = pd.read_csv('../data/isnice_text_train.csv')
X_train, y_train = train.ix[:,2], train.ix[:,1]
test = pd.read_csv('../data/isnice_text_test.csv')
X_test, y_test = test.ix[:,2], test.ix[:,1]
text_reg = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('reg', RandomForestRegressor()),
])
text_reg = text_reg.fit(X_train, y_train)
predicted = text_reg.predict(X_test)
MSE = metrics.regression.mean_squared_error(y_test, predicted)
print 'MSE: {0:f}'.format(MSE)
R2 = metrics.r2_score(y_test, predicted)
print 'R-squared: {0:f}'.format(R2)
plt.scatter(y_test, predicted)
plt.xlabel('Actual Score')
plt.ylabel('Predicted Score')
plt.title('Actual vs Predicted Score')
train = pd.read_csv('../data/isnice_text_train.csv')
X_train, y_train = train.ix[:,2], train.ix[:,3]
test = pd.read_csv('../data/isnice_text_test.csv')
X_test, y_test = test.ix[:,2], test.ix[:,3]
text_clf = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer(sublinear_tf=True,norm='l2')),
('clf', LogisticRegression()),
])
text_clf = text_clf.fit(X_train, y_train)
predicted = text_clf.predict(X_test)
accur = np.mean(predicted == y_test)
print 'Accuracy: {0:f}'.format(accur)
from sklearn.grid_search import GridSearchCV
parameters = {'vect__ngram_range': [(1, 1), (1, 2), (1,4)],
'tfidf__use_idf': (True, False),
'clf__C': [0.1, 1, 5, 10],
}
gs_clf = GridSearchCV(text_clf, parameters, n_jobs=-1)
gs_clf = gs_clf.fit(X_train, y_train)
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print "Best Parameters"
for param_name in sorted(parameters.keys()):
print("%s: %r" % (param_name, best_parameters[param_name]))
predicted = gs_clf.predict(X_test)
accur = np.mean(predicted == y_test)
print ""
print 'Accuracy: {0:f}'.format(accur)
print ""
print "Classification Report"
print(metrics.classification_report(y_test, predicted))
train = pd.read_csv('../data/isnice_text_train.csv')
X_train, y_train = train.ix[:,2], train.ix[:,3]
test = pd.read_csv('../data/isnice_text_test.csv')
X_test, y_test = test.ix[:,2], test.ix[:,3]
MAX_DOCUMENT_LENGTH = 100
vocab_processor = skflow.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(vocab_processor.fit_transform(X_train)))
X_test = np.array(list(vocab_processor.transform(X_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def cnn_model(X, y):
2 layer Convolutional network to predict from sequence of words
to a class.
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = skflow.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = skflow.ops.conv2d(word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convlution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = skflow.ops.conv2d(pool1, N_FILTERS, FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
return skflow.models.logistic_regression(pool2, y)
classifier = skflow.TensorFlowEstimator(model_fn=cnn_model, n_classes=2,
steps=100, optimizer='Adam', learning_rate=0.005, continue_training=True)
for i in xrange(4):
classifier.fit(X_train, y_train, logdir='/tmp/tf_examples/word_cnn')
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preprocessing
Step2: Training the regressor
Step3: Results
Step4: Classification
Step5: Training the classifier
Step6: Results
Step7: Hyperparameter Search and Results
Step8: Convolutional Neural Net
Step9: Processing the vocabulary
Step11: Defining the model
Step12: Training and evaluating the model
|
13,277
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
%matplotlib inline
import random
import numpy as np
import pandas as pd
from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import sklearn.ensemble as ske
import tensorflow as tf
from tensorflow.contrib import learn as skflow
route_df = pd.read_excel('route.xls', index_col=None, na_values=['NA'])
route_df.head()
route_df['mapUsed'].mean()
route_df.groupby('country').mean()
country_metro_grouping = route_df.groupby(['country','metro']).mean()
country_metro_grouping
country_metro_grouping['mapUsed'].plot.bar()
group_by_age = pd.cut(route_df["rating"], np.arange(0, 6, 1))
rating_grouping = route_df.groupby(group_by_age).mean()
rating_grouping['mapUsed'].plot.bar()
route_df.count()
def preprocess_route_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.country = le.fit_transform(processed_df.country)
processed_df.oldRoute = le.fit_transform(processed_df.oldRoute)
processed_df.newRoute = le.fit_transform(processed_df.newRoute)
processed_df = processed_df.drop(['name','uid'],axis=1)
return processed_df
processed_df = preprocess_route_df(route_df)
processed_df
X = processed_df.drop(['mapUsed'], axis=1).values
y = processed_df['mapUsed'].values
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2)
clf_dt = tree.DecisionTreeClassifier(max_depth=10)
clf_dt.fit (X_train, y_train)
clf_dt.score (X_test, y_test)
shuffle_validator = cross_validation.ShuffleSplit(len(X), n_iter=20, test_size=0.2, random_state=0)
def test_classifier(clf):
scores = cross_validation.cross_val_score(clf, X, y, cv=shuffle_validator)
print("Accuracy: %0.4f (+/- %0.2f)" % (scores.mean(), scores.std()))
test_classifier(clf_dt)
clf_rf = ske.RandomForestClassifier(n_estimators=50)
test_classifier(clf_rf)
clf_gb = ske.GradientBoostingClassifier(n_estimators=50)
test_classifier(clf_gb)
eclf = ske.VotingClassifier([('dt', clf_dt), ('rf', clf_rf), ('gb', clf_gb)])
test_classifier(eclf)
#tf_clf_dnn = skflow.TensorFlowDNNClassifier(hidden_units=[20, 40, 20], n_classes=2, batch_size=256, steps=1000, learning_rate=0.05)
feature_columns = [tf.contrib.layers.real_valued_column("")]
tf_clf_dnn = skflow.DNNClassifier(feature_columns=feature_columns, hidden_units=[20, 40, 20], n_classes=2, model_dir="/tmp")
#tf_clf_dnn.evaluate(batch_size=256, steps=1000)
tf_clf_dnn.fit(X_train, y_train, steps=1000)
accuracy_score = tf_clf_dnn.evaluate(X_test, y_test,steps=1000)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's look at the data
Step2: Let's look at what percentage of the drivers are using the map?
Step3: 47% of the drivers are following the map.
Step4: Approximately 45-48% ofthe drivers are using the map in each country and approximately half of the entries are made in metro cities. Let's plot this values to get batter understanding ofthe data
Step5: 1 signifies that it is a metro
Step6: Most of the drivers are between the range of 1-2
Step7: There are no missing values. However if there are missing values we can deal with them in the following way
Step8: What we are basically doing here is processing the data to produce numeric labels for the string data
Step9: X contains all the values besides whether the map was used or not and y contains the answer
Step10: decision tree
Step11: neural Network
|
13,278
|
<ASSISTANT_TASK:>
Python Code:
bool('ok')
bool(8)
bool('')
num=input('Enter a number:')
if num>0:
print 'positive'
elif num<0:
print 'negative'
else:
print 'zero'
x=1
while x<=3:
print x
x+=1
nums=[1,2,3]
for n in nums:
print n
range(0,10)
range(10)
range(10,0,-2) #-2表示步长
d ={'x':1,'y':2,'z':3}
for key in d:
print key,'corresponds to', d[key]
for key, value in d.items():
print key,'corresponds to', value
sorted([4,3,6,8])
reversed([4,3,6,8])
list(reversed([4,3,6,8]))
names=['anne','beth','george','damon']
ages=[12,45,32,102]
for i in range(len(names)):
print names[i],'is',ages[i],'years old'
zip(names,ages)
for name, age in zip(names,ages):
print name,'is',age,'years old'
[x*x for x in range(10)]
[x*x for x in range(10) if x % 3 ==0]
[(x,y) for x in range(2) for y in range(3)]
pass #该语句什么事情也不做
x=1
del x
x
exec 'print "Hello, world"'
from math import sqrt
exec 'sqrt=1'
sqrt(4)
from math import sqrt
scope={}
exec 'sqrt=1' in scope
sqrt(4)
scope['sqrt']
eval('6+18*2') #执行表达式
exec('x=6+18*2') #执行语句
x
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.2 条件执行和if语句、else子句、elif子句
Step2: 2.3 更复杂的条件
Step3: 3.2 for循环
Step4: 因为迭代(循环的另外一种说法)某范围的数字是很常见的,所以有个内建的范围函数供使用:
Step5: range函数的工作方式类似于分片。它包含下限,但不包含上限。如果希望下限为0,可以只提供上限:
Step6: 3.3 循环遍历字典元素
Step7: 如果只需要值,可以使用d.values替代d.keys。d.items方法会将键-值对作为元组返回,for循环的一大好处就是可以循环中使用序列解包:
Step8: 3.4 翻转和排序迭代
Step9: 注意:虽然sorted函数返回列表,reversed函数却返回一个可迭代对象。它们大可在for循环以及join方法中使用,而不会有任何问题。不过却不能直接对它使用索引、分片及调用列表的方法。
Step10: 而内建的zip函数可以用来进行并行迭代,可以把两个序列“压缩”在一起,然后返回一个元组的列表:
Step11: 现在可以在循环中解包元组:
Step12: 3.6 跳出循环
Step13: 如果只想打印出那些能被3整除的平方数呢?可以使用模除运算符—y%3。
Step14: 5 pass、del和exec语句
Step15: del语句不仅会移除一个对象的引用,也会移除那个名字本身。
Step16: exec语句最有用的地方在于可以动态地创造代码字符串。如果字符串是从其他地方获得的—很可能是用户—那么就不能确定其中到底包含什么代码。所以为了安全起见,可以增加一个字典,起到命名空间的作用。
Step17: 可以通过增加in scope来实现,其中scope就是起到放置代码字符串命名空间作用的字典。可以看到,潜在的破坏性代码并不会覆盖sqrt函数,原来的函数能正常工作,而通过exec赋值的变量sqrt只能在它的作用域内有效。
|
13,279
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
import math
import random
import time
import os
import pickle
import tensorflow as tf #built with TensorFlow version 0.9
# in the real project class, we use argparse (https://docs.python.org/3/library/argparse.html)
class FakeArgParse():
def __init__(self):
pass
args = FakeArgParse()
#general model params
args.train = False
args.rnn_size = 100 #400 hidden units
args.tsteps = 256 if args.train else 1
args.batch_size = 32 if args.train else 1
args.nmixtures = 8 # number of Gaussian mixtures in MDN
#window params
args.kmixtures = 1 # number of Gaussian mixtures in attention mechanism (for soft convolution window)
args.alphabet = ' abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' #later we'll add an <UNK> slot for unknown chars
args.tsteps_per_ascii = 25 # an approximate estimate
#book-keeping
args.save_path = './saved/model.ckpt'
args.data_dir = './data'
args.log_dir = './logs/'
args.text = 'call me ishmael some years ago'
args.style = -1 # don't use a custom style
args.bias = 1.0
args.eos_prob = 0.4 # threshold probability for ending a stroke
# in real life the model is a class. I used this hack to make the iPython notebook more readable
class FakeModel():
def __init__(self):
pass
model = FakeModel()
model.char_vec_len = len(args.alphabet) + 1 #plus one for <UNK> token
model.ascii_steps = len(args.text)
model.graves_initializer = tf.truncated_normal_initializer(mean=0., stddev=.075, seed=None, dtype=tf.float32)
model.window_b_initializer = tf.truncated_normal_initializer(mean=-3.0, stddev=.25, seed=None, dtype=tf.float32)
# ----- build the basic recurrent network architecture
cell_func = tf.contrib.rnn.LSTMCell # could be GRUCell or RNNCell
model.cell0 = cell_func(args.rnn_size, state_is_tuple=True, initializer=model.graves_initializer)
model.cell1 = cell_func(args.rnn_size, state_is_tuple=True, initializer=model.graves_initializer)
model.cell2 = cell_func(args.rnn_size, state_is_tuple=True, initializer=model.graves_initializer)
model.input_data = tf.placeholder(dtype=tf.float32, shape=[None, args.tsteps, 3])
model.target_data = tf.placeholder(dtype=tf.float32, shape=[None, args.tsteps, 3])
model.istate_cell0 = model.cell0.zero_state(batch_size=args.batch_size, dtype=tf.float32)
model.istate_cell1 = model.cell1.zero_state(batch_size=args.batch_size, dtype=tf.float32)
model.istate_cell2 = model.cell2.zero_state(batch_size=args.batch_size, dtype=tf.float32)
#slice the input volume into separate vols for each tstep
inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(model.input_data, args.tsteps, 1)]
#build model.cell0 computational graph
outs_cell0, model.fstate_cell0 = tf.contrib.legacy_seq2seq.rnn_decoder(inputs, model.istate_cell0, \
model.cell0, loop_function=None, scope='cell0')
# ----- build the gaussian character window
def get_window(alpha, beta, kappa, c):
# phi -> [? x 1 x ascii_steps] and is a tf matrix
# c -> [? x ascii_steps x alphabet] and is a tf matrix
ascii_steps = c.get_shape()[1].value #number of items in sequence
phi = get_phi(ascii_steps, alpha, beta, kappa)
window = tf.matmul(phi,c)
window = tf.squeeze(window, [1]) # window ~ [?,alphabet]
return window, phi
#get phi for all t,u (returns a [1 x tsteps] matrix) that defines the window
def get_phi(ascii_steps, alpha, beta, kappa):
# alpha, beta, kappa -> [?,kmixtures,1] and each is a tf variable
u = np.linspace(0,ascii_steps-1,ascii_steps) # weight all the U items in the sequence
kappa_term = tf.square( tf.subtract(kappa,u))
exp_term = tf.multiply(-beta,kappa_term)
phi_k = tf.multiply(alpha, tf.exp(exp_term))
phi = tf.reduce_sum(phi_k,1, keep_dims=True)
return phi # phi ~ [?,1,ascii_steps]
def get_window_params(i, out_cell0, kmixtures, prev_kappa, reuse=True):
hidden = out_cell0.get_shape()[1]
n_out = 3*kmixtures
with tf.variable_scope('window',reuse=reuse):
window_w = tf.get_variable("window_w", [hidden, n_out], initializer=model.graves_initializer)
window_b = tf.get_variable("window_b", [n_out], initializer=model.window_b_initializer)
abk_hats = tf.nn.xw_plus_b(out_cell0, window_w, window_b) # abk_hats ~ [?,n_out] = "alpha, beta, kappa hats"
abk = tf.exp(tf.reshape(abk_hats, [-1, 3*kmixtures,1]))
alpha, beta, kappa = tf.split(abk, 3, 1) # alpha_hat, etc ~ [?,kmixtures]
kappa = kappa + prev_kappa
return alpha, beta, kappa # each ~ [?,kmixtures,1]
model.init_kappa = tf.placeholder(dtype=tf.float32, shape=[None, args.kmixtures, 1])
model.char_seq = tf.placeholder(dtype=tf.float32, shape=[None, model.ascii_steps, model.char_vec_len])
wavg_prev_kappa = model.init_kappa
prev_window = model.char_seq[:,0,:]
#add gaussian window result
reuse = False
for i in range(len(outs_cell0)):
[alpha, beta, new_kappa] = get_window_params(i, outs_cell0[i], args.kmixtures, wavg_prev_kappa, reuse=reuse)
window, phi = get_window(alpha, beta, new_kappa, model.char_seq)
outs_cell0[i] = tf.concat((outs_cell0[i],window), 1) #concat outputs
outs_cell0[i] = tf.concat((outs_cell0[i],inputs[i]), 1) #concat input data
# prev_kappa = new_kappa #tf.ones_like(new_kappa, dtype=tf.float32, name="prev_kappa_ones") #
wavg_prev_kappa = tf.reduce_mean( new_kappa, reduction_indices=1, keep_dims=True) # mean along kmixtures dimension
reuse = True
model.window = window #save the last window (for generation)
model.phi = phi #save the last window (for generation)
model.new_kappa = new_kappa #save the last window (for generation)
model.alpha = alpha #save the last window (for generation)
model.wavg_prev_kappa = wavg_prev_kappa
# ----- finish building second recurrent cell
outs_cell1, model.fstate_cell1 = tf.contrib.legacy_seq2seq.rnn_decoder(outs_cell0, model.istate_cell1, model.cell1, \
loop_function=None, scope='cell1') #use scope from training
# ----- finish building third recurrent cell
outs_cell2, model.fstate_cell2 = tf.contrib.legacy_seq2seq.rnn_decoder(outs_cell1, model.istate_cell2, model.cell2, \
loop_function=None, scope='cell2')
out_cell2 = tf.reshape(tf.concat(outs_cell2, 1), [-1, args.rnn_size]) #concat outputs for efficiency
#put a dense cap on top of the rnn cells (to interface with the mixture density network)
n_out = 1 + args.nmixtures * 6 # params = end_of_stroke + 6 parameters per Gaussian
with tf.variable_scope('mdn_dense'):
output_w = tf.get_variable("output_w", [args.rnn_size, n_out], initializer=model.graves_initializer)
output_b = tf.get_variable("output_b", [n_out], initializer=model.graves_initializer)
output = tf.nn.xw_plus_b(out_cell2, output_w, output_b) #data flows through dense nn
# ----- build mixture density cap on top of second recurrent cell
def gaussian2d(x1, x2, mu1, mu2, s1, s2, rho):
# define gaussian mdn (eq 24, 25 from http://arxiv.org/abs/1308.0850)
x_mu1 = tf.subtract(x1, mu1)
x_mu2 = tf.subtract(x2, mu2)
Z = tf.square(tf.div(x_mu1, s1)) + \
tf.square(tf.div(x_mu2, s2)) - \
2*tf.div(tf.multiply(rho, tf.multiply(x_mu1, x_mu2)), tf.multiply(s1, s2))
rho_square_term = 1-tf.square(rho)
power_e = tf.exp(tf.div(-Z,2*rho_square_term))
regularize_term = 2*np.pi*tf.multiply(tf.multiply(s1, s2), tf.sqrt(rho_square_term))
gaussian = tf.div(power_e, regularize_term)
return gaussian
# now transform dense NN outputs into params for MDN
def get_mdn_coef(Z):
# returns the tf slices containing mdn dist params (eq 18...23 of http://arxiv.org/abs/1308.0850)
eos_hat = Z[:, 0:1] #end of sentence tokens
pi_hat, mu1_hat, mu2_hat, sigma1_hat, sigma2_hat, rho_hat = tf.split(Z[:, 1:], 6, 1)
model.pi_hat, model.sigma1_hat, model.sigma2_hat = \
pi_hat, sigma1_hat, sigma2_hat # these are useful for biasing
eos = tf.sigmoid(-1*eos_hat) # technically we gained a negative sign
pi = tf.nn.softmax(pi_hat) # softmax z_pi:
mu1 = mu1_hat; mu2 = mu2_hat # leave mu1, mu2 as they are
sigma1 = tf.exp(sigma1_hat); sigma2 = tf.exp(sigma2_hat) # exp for sigmas
rho = tf.tanh(rho_hat) # tanh for rho (squish between -1 and 1)
return [eos, pi, mu1, mu2, sigma1, sigma2, rho]
# reshape target data (as we did the input data)
flat_target_data = tf.reshape(model.target_data,[-1, 3])
[x1_data, x2_data, eos_data] = tf.split(flat_target_data, 3, 1) #we might as well split these now
[model.eos, model.pi, model.mu1, model.mu2, model.sigma1, model.sigma2, model.rho] = get_mdn_coef(output)
model.sess = tf.InteractiveSession()
model.saver = tf.train.Saver(tf.global_variables())
model.sess.run(tf.global_variables_initializer())
load_was_success = True # yes, I'm being optimistic
global_step = 0
try:
save_dir = '/'.join(args.save_path.split('/')[:-1])
ckpt = tf.train.get_checkpoint_state(save_dir)
load_path = ckpt.model_checkpoint_path
model.saver.restore(model.sess, load_path)
except:
print("no saved model to load. starting new session")
load_was_success = False
else:
print("loaded model: {}".format(load_path))
model.saver = tf.train.Saver(tf.global_variables())
global_step = int(load_path.split('-')[-1])
# utility function for converting input ascii characters into vectors the network can understand.
# index position 0 means "unknown"
def to_one_hot(s, ascii_steps, alphabet):
steplimit=3e3; s = s[:3e3] if len(s) > 3e3 else s # clip super-long strings
seq = [alphabet.find(char) + 1 for char in s]
if len(seq) >= ascii_steps:
seq = seq[:ascii_steps]
else:
seq = seq + [0]*(ascii_steps - len(seq))
one_hot = np.zeros((ascii_steps,len(alphabet)+1))
one_hot[np.arange(ascii_steps),seq] = 1
return one_hot
def get_style_states(model, args):
with open(os.path.join(args.data_dir, 'styles.p'),'rb') as f:
style_strokes, style_strings = pickle.load(f, encoding='latin1')
style_strokes, style_string = style_strokes[args.style], style_strings[args.style]
style_onehot = [to_one_hot(style_string, model.ascii_steps, args.alphabet)]
c0, c1, c2 = model.istate_cell0.c.eval(), model.istate_cell1.c.eval(), model.istate_cell2.c.eval()
h0, h1, h2 = model.istate_cell0.h.eval(), model.istate_cell1.h.eval(), model.istate_cell2.h.eval()
if args.style is -1: return [c0, c1, c2, h0, h1, h2] #model 'chooses' random style
style_stroke = np.zeros((1, 1, 3), dtype=np.float32)
style_kappa = np.zeros((1, args.kmixtures, 1))
prime_len = 500 # must be <= 700
for i in xrange(prime_len):
style_stroke[0][0] = style_strokes[i,:]
feed = {model.input_data: style_stroke, model.char_seq: style_onehot, model.init_kappa: style_kappa, \
model.istate_cell0.c: c0, model.istate_cell1.c: c1, model.istate_cell2.c: c2, \
model.istate_cell0.h: h0, model.istate_cell1.h: h1, model.istate_cell2.h: h2}
fetch = [model.wavg_prev_kappa, \
model.fstate_cell0.c, model.fstate_cell1.c, model.fstate_cell2.c,
model.fstate_cell0.h, model.fstate_cell1.h, model.fstate_cell2.h]
[style_kappa, c0, c1, c2, h0, h1, h2] = model.sess.run(fetch, feed)
return [c0, c1, c2, np.zeros_like(h0), np.zeros_like(h1), np.zeros_like(h2)] #only the c vectors should be primed
# initialize some sampling parameters
one_hot = [to_one_hot(args.text, model.ascii_steps, args.alphabet)] # convert input string to one-hot vector
[c0, c1, c2, h0, h1, h2] = get_style_states(model, args) # get numpy zeros states for all three LSTMs
kappa = np.zeros((1, args.kmixtures, 1)) # attention's read head starts at index 0
prev_x = np.asarray([[[0, 0, 1]]], dtype=np.float32) # start with a pen stroke at (0,0)
strokes, pis, windows, phis, kappas = [], [], [], [], [] # the data we're going to generate will go here
def sample_gaussian2d(mu1, mu2, s1, s2, rho):
mean = [mu1, mu2]
cov = [[s1*s1, rho*s1*s2], [rho*s1*s2, s2*s2]]
x = np.random.multivariate_normal(mean, cov, 1)
return x[0][0], x[0][1]
finished = False ; i = 0
while not finished and i < 800:
feed = {model.input_data: prev_x, model.char_seq: one_hot, model.init_kappa: kappa, \
model.istate_cell0.c: c0, model.istate_cell1.c: c1, model.istate_cell2.c: c2, \
model.istate_cell0.h: h0, model.istate_cell1.h: h1, model.istate_cell2.h: h2}
fetch = [model.pi_hat, model.mu1, model.mu2, model.sigma1_hat, model.sigma2_hat, model.rho, model.eos, \
model.window, model.phi, model.new_kappa, model.wavg_prev_kappa, model.alpha, \
model.fstate_cell0.c, model.fstate_cell1.c, model.fstate_cell2.c,\
model.fstate_cell0.h, model.fstate_cell1.h, model.fstate_cell2.h]
[pi_hat, mu1, mu2, sigma1_hat, sigma2_hat, rho, eos, window, phi, kappa, wavg_kappa, alpha, \
c0, c1, c2, h0, h1, h2] = model.sess.run(fetch, feed)
#bias stuff:
sigma1 = np.exp(sigma1_hat - args.bias)
sigma2 = np.exp(sigma2_hat - args.bias)
pi_hat *= 1 + args.bias # apply bias
pi = np.zeros_like(pi_hat) # need to preallocate
pi[0] = np.exp(pi_hat[0]) / np.sum(np.exp(pi_hat[0]), axis=0) # softmax
# choose a component from the MDN
idx = np.random.choice(pi.shape[1], p=pi[0])
eos = 1 if args.eos_prob < eos[0][0] else 0 # use 0.5 as arbitrary boundary
x1, x2 = sample_gaussian2d(mu1[0][idx], mu2[0][idx], sigma1[0][idx], sigma2[0][idx], rho[0][idx])
# store the info at this time step
windows.append(window)
phis.append(phi[0])
kappas.append(kappa[0])
pis.append(pi[0])
strokes.append([mu1[0][idx], mu2[0][idx], sigma1[0][idx], sigma2[0][idx], rho[0][idx], eos])
# test if finished (has the read head seen the whole ascii sequence?)
main_kappa_idx = np.where(alpha[0]==np.max(alpha[0])); # choose the read head with the highes alpha value
finished = True if kappa[0][main_kappa_idx] > len(args.text) + 1 else False
# new input is previous output
prev_x[0][0] = np.array([x1, x2, eos], dtype=np.float32)
kappa = wavg_kappa
i+=1
windows = np.vstack(windows)
phis = np.vstack(phis)
kappas = np.vstack(kappas)
pis = np.vstack(pis)
strokes = np.vstack(strokes)
# the network predicts the displacements between pen points, so do a running sum over the time dimension
strokes[:,:2] = np.cumsum(strokes[:,:2], axis=0)
# plots parameters from the attention mechanism
def window_plots(phis, windows):
plt.figure(figsize=(16,4))
plt.subplot(121)
plt.title('Phis', fontsize=20)
plt.xlabel("ascii #", fontsize=15)
plt.ylabel("time steps", fontsize=15)
plt.imshow(phis, interpolation='nearest', aspect='auto', cmap=cm.jet)
plt.subplot(122)
plt.title('Soft attention window', fontsize=20)
plt.xlabel("one-hot vector", fontsize=15)
plt.ylabel("time steps", fontsize=15)
plt.imshow(windows, interpolation='nearest', aspect='auto', cmap=cm.jet)
window_plots(phis, windows)
plt.figure(figsize=(8,4))
plt.title("How MDN $\pi$ values change over time", fontsize=15)
plt.xlabel("$\pi$ values", fontsize=15)
plt.ylabel("time step", fontsize=15)
plt.imshow(pis, interpolation='nearest', aspect='auto', cmap=cm.jet)
def gauss_plot(strokes, title, figsize = (20,2)):
plt.figure(figsize=figsize)
import matplotlib.mlab as mlab
buff = 1 ; epsilon = 1e-4
minx, maxx = np.min(strokes[:,0])-buff, np.max(strokes[:,0])+buff
miny, maxy = np.min(strokes[:,1])-buff, np.max(strokes[:,1])+buff
delta = abs(maxx-minx)/400. ;
x = np.arange(minx, maxx, delta)
y = np.arange(miny, maxy, delta)
X, Y = np.meshgrid(x, y)
Z = np.zeros_like(X)
for i in range(strokes.shape[0]):
gauss = mlab.bivariate_normal(X, Y, mux=strokes[i,0], muy=strokes[i,1], \
sigmax=strokes[i,2], sigmay=strokes[i,3], sigmaxy=0) # sigmaxy=strokes[i,4] gives error
Z += gauss/(np.max(gauss) + epsilon)
plt.title(title, fontsize=20)
plt.imshow(Z)
gauss_plot(strokes, "Stroke probability", figsize = (2*model.ascii_steps,4))
# plots the stroke data (handwriting!)
def line_plot(strokes, title, figsize = (20,2)):
plt.figure(figsize=figsize)
eos_preds = np.where(strokes[:,-1] == 1)
eos_preds = [0] + list(eos_preds[0]) + [-1] #add start and end indices
for i in range(len(eos_preds)-1):
start = eos_preds[i]+1
stop = eos_preds[i+1]
plt.plot(strokes[start:stop,0], strokes[start:stop,1],'b-', linewidth=2.0) #draw a stroke
plt.title(title, fontsize=20)
plt.gca().invert_yaxis()
plt.show()
line_plot(strokes, 'Line plot: "{}"'.format(args.text), figsize=(model.ascii_steps,2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define hyperparameters
Step2: Model overview
Step3: Initialize LSTMs and build LSTM 1
Step4: In the cell above we use the TensorFlow seq2seq API to create three LSTM cells. Learn about how LSTMs work here. When we unroll the model in time, it will look like this (arrows denote flow of data)
Step5: Alex Graves gives a really nice intuition for the roles of $\alpha$, $\beta$, and $\kappa$ in his paper
Step6: Build LSTMs 2 and 3
Step7: Build Mixture Density Network (MDN)
Step8: Next, define a 2D Gaussian using tensor operations
Step9: Finally, transform the NN outputs into parameters for a mixture of Gaussians
Step10: Now we just map the outputs from the dense NN to the Gaussian mixture parameters.
Step11: Side note
Step12: Generate handwriting!
Step13: Prime the model
Step14: Prepare feed parameters for the model
Step15: Sample from the model
Step16: Plots
Step17: Phis
Step18: Mixture Density Network $\pi$ probabilities
Step19: Stroke probability
|
13,280
|
<ASSISTANT_TASK:>
Python Code:
import yahoo_finance
import requests
import datetime
def print_unix_timestamp_date(timestamp):
print(
datetime.datetime.fromtimestamp(
int(timestamp)
).strftime('%Y-%m-%d %H:%M:%S')
)
print_unix_timestamp_date("1420077600")
print_unix_timestamp_date("1496113200")
EXAMPLE_QUERY = "http:/query1.finance.yahoo.com/v7/finance/download/AMZN?period1=1483585200&period2=1496113200&interval=1d&events=history&crumb=mFcCyf2I8jh"
import urllib2
response = urllib2.urlopen(EXAMPLE_QUERY)
html = response.read()
csv_values = requests.get(EXAMPLE_QUERY)
csv_values
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
%matplotlib inline
%load_ext autoreload
%autoreload 2
pd.__version__
import pandas_datareader as pdr
pdr.__version__
from pandas_datareader import data, wb
SPY_CREATION_DATE = dt.datetime(1993,1,22)
start = SPY_CREATION_DATE
end = dt.datetime(1995,12,31)
#Let's try to get SPY
SPY_df = data.DataReader(name='SPY',data_source='google',start=start,
end=end)
print(SPY_df.shape)
SPY_df.head()
from yahoo_finance import Share
yahoo = Share('YHOO')
print(yahoo.get_price())
yahoo.get_historical('2005-01-01','2016-12-31')
import pandas_datareader.data as web
SPY_CREATION_DATE = dt.datetime(1993,1,22)
start = SPY_CREATION_DATE
end = dt.datetime(2016,12,31)
tickers = ['SPY','GOOG','AAPL','NVDA']
#Create the (empty) dataframe
dates = pd.date_range(start,end)
data_df = pd.DataFrame(index=dates)
#Let's try to get SPY
SPY_df = web.DataReader(name='SPY',data_source='google',start=start,
end=end)
print(SPY_df.shape)
SPY_df.head()
SPY_df['Close'].plot()
(SPY_df.index[-1]-SPY_df.index[0]).days / 365
data_df
# This will add the data of one ticker
def add_ticker(data,ticker_df,ticker_name):
for key in data.keys():
column_df = pd.DataFrame(ticker_df[key]).rename(columns={key:ticker_name})
data[key] = data[key].join(column_df, how='left')
return data
def add_tickers(data, tickers, source):
for name in tickers:
if(not (name in data['Open'].columns)):
ticker_df = web.DataReader(name=name,data_source=source,start=start,end=end)
data = add_ticker_data(data,ticker_df,name)
print('Added: '+name)
else:
print(name+' was already added')
return data
iterables = [SPY_df.index, SPY_df.columns]
indexes = pd.MultiIndex.from_product(iterables, names=['date', 'feature'])
data_multi = pd.DataFrame(index=indexes)
print(data_multi.shape)
data_multi.head(20)
data_multi.xs('2001-02-08', level='date')
SPY_df.iloc[0]
SPY_df.head()
data_multi['sd'] = np.nan
data_multi.loc['2001-02-05','Open']['sd'] = SPY_df.loc['2001-02-05','Open']
data_multi
SPY_df.reset_index(inplace=True)
SPY_df.head()
SPY_df.set_index(['Date','Open'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the data
Step2: So, Google has a limit of 15 years of data on each query
Step3: Keep dictionary or use multiindex?
|
13,281
|
<ASSISTANT_TASK:>
Python Code::
import cv2
import numpy as np
img = cv2.imread('gradient.jpg',0)
_,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
_,th2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV) #check every pixel with 127
cv2.imshow("img",img)
cv2.imshow("th1",th1)
cv2.imshow("th2",th2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
13,282
|
<ASSISTANT_TASK:>
Python Code:
! pip uninstall -y tensorflow
! pip install -q tensorflow-model-optimization
! pip install --upgrade tensorflow==2.6
import tempfile
import os
import tensorflow as tf
from tensorflow import keras
# Show the currently installed version of TensorFlow
print("TensorFlow version: ",tf.version.VERSION)
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture.
# TODO
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
# TODO
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_split=0.1,
)
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
# q_aware stands for for quantization aware.
q_aware_model = quantize_model(model)
# `quantize_model` requires a recompile.
q_aware_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
q_aware_model.summary()
train_images_subset = train_images[0:1000] # out of 60000
train_labels_subset = train_labels[0:1000]
q_aware_model.fit(train_images_subset, train_labels_subset,
batch_size=500, epochs=1, validation_split=0.1)
_, baseline_model_accuracy = model.evaluate(
test_images, test_labels, verbose=0)
_, q_aware_model_accuracy = q_aware_model.evaluate(
test_images, test_labels, verbose=0)
print('Baseline test accuracy:', baseline_model_accuracy)
print('Quant test accuracy:', q_aware_model_accuracy)
converter = tf.lite.TFLiteConverter.from_keras_model(q_aware_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
quantized_tflite_model = converter.convert()
import numpy as np
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for i, test_image in enumerate(test_images):
if i % 1000 == 0:
print('Evaluated on {n} results so far.'.format(n=i))
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
# TODO
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
print('\n')
# Compare prediction results with ground truth labels to calculate accuracy.
prediction_digits = np.array(prediction_digits)
accuracy = (prediction_digits == test_labels).mean()
return accuracy
interpreter = tf.lite.Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
test_accuracy = evaluate_model(interpreter)
print('Quant TFLite test_accuracy:', test_accuracy)
print('Quant TF test accuracy:', q_aware_model_accuracy)
# Create float TFLite model.
# TODO
float_converter = tf.lite.TFLiteConverter.from_keras_model(model)
float_tflite_model = float_converter.convert()
# Measure sizes of models.
_, float_file = tempfile.mkstemp('.tflite')
_, quant_file = tempfile.mkstemp('.tflite')
with open(quant_file, 'wb') as f:
f.write(quantized_tflite_model)
with open(float_file, 'wb') as f:
f.write(float_tflite_model)
print("Float model in Mb:", os.path.getsize(float_file) / float(2**20))
print("Quantized model in Mb:", os.path.getsize(quant_file) / float(2**20))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This notebook uses TF2.x.
Step2: Train a model for MNIST without quantization aware training
Step3: Clone and fine-tune pre-trained model with quantization aware training
Step4: Train and evaluate the model against baseline
Step5: For this example, there is minimal to no loss in test accuracy after quantization aware training, compared to the baseline.
Step6: Create quantized model for TFLite backend
Step7: See persistence of accuracy from TF to TFLite
Step8: You evaluate the quantized model and see that the accuracy from TensorFlow persists to the TFLite backend.
Step9: See 4x smaller model from quantization
|
13,283
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import pymc3 as pm
from pymc3.distributions.timeseries import GaussianRandomWalk
import seaborn as sns
from statsmodels import datasets
from theano import tensor as T
df = datasets.get_rdataset('mastectomy', 'HSAUR', cache=True).data
df.event = df.event.astype(np.int64)
df.metastized = (df.metastized == 'yes').astype(np.int64)
n_patients = df.shape[0]
patients = np.arange(n_patients)
df.head()
n_patients
df.event.mean()
fig, ax = plt.subplots(figsize=(8, 6))
blue, _, red = sns.color_palette()[:3]
ax.hlines(patients[df.event.values == 0], 0, df[df.event.values == 0].time,
color=blue, label='Censored')
ax.hlines(patients[df.event.values == 1], 0, df[df.event.values == 1].time,
color=red, label='Uncensored')
ax.scatter(df[df.metastized.values == 1].time, patients[df.metastized.values == 1],
color='k', zorder=10, label='Metastized')
ax.set_xlim(left=0)
ax.set_xlabel('Months since mastectomy')
ax.set_yticks([])
ax.set_ylabel('Subject')
ax.set_ylim(-0.25, n_patients + 0.25)
ax.legend(loc='center right');
interval_length = 3
interval_bounds = np.arange(0, df.time.max() + interval_length + 1, interval_length)
n_intervals = interval_bounds.size - 1
intervals = np.arange(n_intervals)
fig, ax = plt.subplots(figsize=(8, 6))
ax.hist(df[df.event == 1].time.values, bins=interval_bounds,
color=red, alpha=0.5, lw=0,
label='Uncensored');
ax.hist(df[df.event == 0].time.values, bins=interval_bounds,
color=blue, alpha=0.5, lw=0,
label='Censored');
ax.set_xlim(0, interval_bounds[-1]);
ax.set_xlabel('Months since mastectomy');
ax.set_yticks([0, 1, 2, 3]);
ax.set_ylabel('Number of observations');
ax.legend();
last_period = np.floor((df.time - 0.01) / interval_length).astype(int)
death = np.zeros((n_patients, n_intervals))
death[patients, last_period] = df.event
exposure = np.greater_equal.outer(df.time, interval_bounds[:-1]) * interval_length
exposure[patients, last_period] = df.time - interval_bounds[last_period]
SEED = 5078864 # from random.org
with pm.Model() as model:
lambda0 = pm.Gamma('lambda0', 0.01, 0.01, shape=n_intervals)
beta = pm.Normal('beta', 0, sd=1000)
lambda_ = pm.Deterministic('lambda_', T.outer(T.exp(beta * df.metastized), lambda0))
mu = pm.Deterministic('mu', exposure * lambda_)
obs = pm.Poisson('obs', mu, observed=death)
n_samples = 1000
with model:
trace_ = pm.sample(n_samples,random_seed=SEED)
trace = trace_[100:]
np.exp(trace['beta'].mean())
pm.plot_posterior(trace, varnames=['beta'], color='#87ceeb');
pm.autocorrplot(trace, varnames=['beta']);
base_hazard = trace['lambda0']
met_hazard = trace['lambda0'] * np.exp(np.atleast_2d(trace['beta']).T)
def cum_hazard(hazard):
return (interval_length * hazard).cumsum(axis=-1)
def survival(hazard):
return np.exp(-cum_hazard(hazard))
def plot_with_hpd(x, hazard, f, ax, color=None, label=None, alpha=0.05):
mean = f(hazard.mean(axis=0))
percentiles = 100 * np.array([alpha / 2., 1. - alpha / 2.])
hpd = np.percentile(f(hazard), percentiles, axis=0)
ax.fill_between(x, hpd[0], hpd[1], color=color, alpha=0.25)
ax.step(x, mean, color=color, label=label);
fig, (hazard_ax, surv_ax) = plt.subplots(ncols=2, sharex=True, sharey=False, figsize=(16, 6))
plot_with_hpd(interval_bounds[:-1], base_hazard, cum_hazard,
hazard_ax, color=blue, label='Had not metastized')
plot_with_hpd(interval_bounds[:-1], met_hazard, cum_hazard,
hazard_ax, color=red, label='Metastized')
hazard_ax.set_xlim(0, df.time.max());
hazard_ax.set_xlabel('Months since mastectomy');
hazard_ax.set_ylabel(r'Cumulative hazard $\Lambda(t)$');
hazard_ax.legend(loc=2);
plot_with_hpd(interval_bounds[:-1], base_hazard, survival,
surv_ax, color=blue)
plot_with_hpd(interval_bounds[:-1], met_hazard, survival,
surv_ax, color=red)
surv_ax.set_xlim(0, df.time.max());
surv_ax.set_xlabel('Months since mastectomy');
surv_ax.set_ylabel('Survival function $S(t)$');
fig.suptitle('Bayesian survival model');
with pm.Model() as time_varying_model:
lambda0 = pm.Gamma('lambda0', 0.01, 0.01, shape=n_intervals)
beta = GaussianRandomWalk('beta', tau=1., shape=n_intervals)
lambda_ = pm.Deterministic('h', lambda0 * T.exp(T.outer(T.constant(df.metastized), beta)))
mu = pm.Deterministic('mu', exposure * lambda_)
obs = pm.Poisson('obs', mu, observed=death)
with time_varying_model:
time_varying_trace_ = pm.sample(n_samples, random_seed=SEED)
time_varying_trace = time_varying_trace_[100:]
pm.forestplot(time_varying_trace, varnames=['beta']);
fig, ax = plt.subplots(figsize=(8, 6))
beta_hpd = np.percentile(time_varying_trace['beta'], [2.5, 97.5], axis=0)
beta_low = beta_hpd[0]
beta_high = beta_hpd[1]
ax.fill_between(interval_bounds[:-1], beta_low, beta_high,
color=blue, alpha=0.25);
beta_hat = time_varying_trace['beta'].mean(axis=0)
ax.step(interval_bounds[:-1], beta_hat, color=blue);
ax.scatter(interval_bounds[last_period[(df.event.values == 1) & (df.metastized == 1)]],
beta_hat[last_period[(df.event.values == 1) & (df.metastized == 1)]],
c=red, zorder=10, label='Died, cancer metastized');
ax.scatter(interval_bounds[last_period[(df.event.values == 0) & (df.metastized == 1)]],
beta_hat[last_period[(df.event.values == 0) & (df.metastized == 1)]],
c=blue, zorder=10, label='Censored, cancer metastized');
ax.set_xlim(0, df.time.max());
ax.set_xlabel('Months since mastectomy');
ax.set_ylabel(r'$\beta_j$');
ax.legend();
tv_base_hazard = time_varying_trace['lambda0']
tv_met_hazard = time_varying_trace['lambda0'] * np.exp(np.atleast_2d(time_varying_trace['beta']))
fig, ax = plt.subplots(figsize=(8, 6))
ax.step(interval_bounds[:-1], cum_hazard(base_hazard.mean(axis=0)),
color=blue, label='Had not metastized');
ax.step(interval_bounds[:-1], cum_hazard(met_hazard.mean(axis=0)),
color=red, label='Metastized');
ax.step(interval_bounds[:-1], cum_hazard(tv_base_hazard.mean(axis=0)),
color=blue, linestyle='--', label='Had not metastized (time varying effect)');
ax.step(interval_bounds[:-1], cum_hazard(tv_met_hazard.mean(axis=0)),
color=red, linestyle='--', label='Metastized (time varying effect)');
ax.set_xlim(0, df.time.max() - 4);
ax.set_xlabel('Months since mastectomy');
ax.set_ylim(0, 2);
ax.set_ylabel(r'Cumulative hazard $\Lambda(t)$');
ax.legend(loc=2);
fig, (hazard_ax, surv_ax) = plt.subplots(ncols=2, sharex=True, sharey=False, figsize=(16, 6))
plot_with_hpd(interval_bounds[:-1], tv_base_hazard, cum_hazard,
hazard_ax, color=blue, label='Had not metastized')
plot_with_hpd(interval_bounds[:-1], tv_met_hazard, cum_hazard,
hazard_ax, color=red, label='Metastized')
hazard_ax.set_xlim(0, df.time.max());
hazard_ax.set_xlabel('Months since mastectomy');
hazard_ax.set_ylim(0, 2);
hazard_ax.set_ylabel(r'Cumulative hazard $\Lambda(t)$');
hazard_ax.legend(loc=2);
plot_with_hpd(interval_bounds[:-1], tv_base_hazard, survival,
surv_ax, color=blue)
plot_with_hpd(interval_bounds[:-1], tv_met_hazard, survival,
surv_ax, color=red)
surv_ax.set_xlim(0, df.time.max());
surv_ax.set_xlabel('Months since mastectomy');
surv_ax.set_ylabel('Survival function $S(t)$');
fig.suptitle('Bayesian survival model with time varying effects');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fortunately, statsmodels.datasets makes it quite easy to load a number of data sets from R.
Step2: Each row represents observations from a woman diagnosed with breast cancer that underwent a mastectomy. The column time represents the time (in months) post-surgery that the woman was observed. The column event indicates whether or not the woman died during the observation period. The column metastized represents whether the cancer had metastized prior to surgery.
Step3: Just over 40% of our observations are censored. We visualize the observed durations and indicate which observations are censored below.
Step4: When an observation is censored (df.event is zero), df.time is not the subject's survival time. All we can conclude from such a censored obsevation is that the subject's true survival time exceeds df.time.
Step5: We see how deaths and censored observations are distributed in these intervals.
Step6: With the prior distributions on $\beta$ and $\lambda_0(t)$ chosen, we now show how the model may be fit using MCMC simulation with pymc3. The key observation is that the piecewise-constant proportional hazard model is closely related to a Poisson regression model. (The models are not identical, but their likelihoods differ by a factor that depends only on the observed data and not the parameters $\beta$ and $\lambda_j$. For details, see Germán Rodríguez's WWS 509 course notes.)
Step7: We also define $t_{i, j}$ to be the amount of time the $i$-th subject was at risk in the $j$-th interval.
Step8: Finally, denote the risk incurred by the $i$-th subject in the $j$-th interval as $\lambda_{i, j} = \lambda_j \exp(\mathbf{x}_i \beta)$.
Step9: We now sample from the model.
Step10: We see that the hazard rate for subjects whose cancer has metastized is about double the rate of those whose cancer has not metastized.
Step11: We now examine the effect of metastization on both the cumulative hazard and on the survival function.
Step12: We see that the cumulative hazard for metastized subjects increases more rapidly initially (through about seventy months), after which it increases roughly in parallel with the baseline cumulative hazard.
Step13: We proceed to sample from this model.
Step14: We see from the plot of $\beta_j$ over time below that initially $\beta_j > 0$, indicating an elevated hazard rate due to metastization, but that this risk declines as $\beta_j < 0$ eventually.
Step15: The coefficients $\beta_j$ begin declining rapidly around one hundred months post-mastectomy, which seems reasonable, given that only three of twelve subjects whose cancer had metastized lived past this point died during the study.
|
13,284
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import lxmls.readers.sentiment_reader as srs
from lxmls.deep_learning.utils import AmazonData
corpus = srs.SentimentCorpus("books")
data = AmazonData(corpus=corpus)
from lxmls.deep_learning.utils import Model, glorot_weight_init
import numpy as np
import torch
from torch.autograd import Variable
class PytorchLogLinear(Model):
def __init__(self, **config):
# Initialize parameters
weight_shape = (config['input_size'], config['num_classes'])
# after Xavier Glorot et al
self.weight = glorot_weight_init(weight_shape, 'softmax')
self.bias = np.zeros((1, config['num_classes']))
self.learning_rate = config['learning_rate']
# IMPORTANT: Cast to pytorch format
self.weight = Variable(torch.from_numpy(self.weight).float(), requires_grad=True)
self.bias = Variable(torch.from_numpy(self.bias).float(), requires_grad=True)
# Instantiate softmax and negative logkelihood in log domain
self.logsoftmax = torch.nn.LogSoftmax(dim=1)
self.loss = torch.nn.NLLLoss()
def _log_forward(self, input=None):
Forward pass of the computation graph in logarithm domain (pytorch)
# IMPORTANT: Cast to pytorch format
input = Variable(torch.from_numpy(input).float(), requires_grad=False)
# Linear transformation
z = torch.matmul(input, torch.t(self.weight)) + self.bias
# Softmax implemented in log domain
log_tilde_z = self.logsoftmax(z)
# NOTE that this is a pytorch class!
return log_tilde_z
def predict(self, input=None):
Most probably class index
log_forward = self._log_forward(input).data.numpy()
return np.argmax(np.exp(log_forward), axis=1)
def update(self, input=None, output=None):
Stochastic Gradient Descent update
# IMPORTANT: Class indices need to be casted to LONG
true_class = Variable(torch.from_numpy(output).long(), requires_grad=False)
# Compute negative log-likelihood loss
loss = self.loss(self._log_forward(input), true_class)
# Use autograd to compute the backward pass.
loss.backward()
# SGD update
self.weight.data -= self.learning_rate * self.weight.grad.data
self.bias.data -= self.learning_rate * self.bias.grad.data
# Zero gradients
self.weight.grad.data.zero_()
self.bias.grad.data.zero_()
return loss.data.numpy()
model = PytorchLogLinear(
input_size=corpus.nr_features,
num_classes=2,
learning_rate=0.05
)
# Hyper-parameters
num_epochs = 10
batch_size = 30
# Get batch iterators for train and test
train_batches = data.batches('train', batch_size=batch_size)
test_set = data.batches('test', batch_size=None)[0]
# Epoch loop
for epoch in range(num_epochs):
# Batch loop
for batch in train_batches:
model.update(input=batch['input'], output=batch['output'])
# Prediction for this epoch
hat_y = model.predict(input=test_set['input'])
# Evaluation
accuracy = 100*np.mean(hat_y == test_set['output'])
# Inform user
print("Epoch %d: accuracy %2.2f %%" % (epoch+1, accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: Train Log Linear in Pytorch
Step5: Once you understand the model you can instantiate it and run it using the standard training loop we have used on previous exercises.
|
13,285
|
<ASSISTANT_TASK:>
Python Code:
# Useful additional packages
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from math import pi
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import available_backends, execute, register, get_backend
from qiskit.tools.visualization import circuit_drawer
from qiskit.tools.qi.qi import state_fidelity
from qiskit import Aer
backend = Aer.get_backend('unitary_simulator')
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.u3(pi/2,pi/2,pi/2,q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.u2(pi/2,pi/2,q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.u1(pi/2,q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.u0(pi/2,q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.iden(q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.x(q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.y(q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.z(q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.h(q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.s(q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.sdg(q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.t(q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.tdg(q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.rx(pi/2,q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.ry(pi/2,q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.rz(pi/2,q)
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
q = QuantumRegister(2)
qc = QuantumCircuit(q)
qc.cx(q[0],q[1])
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.cy(q[0],q[1])
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.cz(q[0],q[1])
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.ch(q[0],q[1])
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary']/(0.707+0.707j), 3)
qc = QuantumCircuit(q)
qc.crz(pi/2,q[0],q[1])
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.cu1(pi/2,q[0], q[1])
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.cu3(pi/2, pi/2, pi/2, q[0], q[1])
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.swap(q[0], q[1])
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
q = QuantumRegister(3)
qc = QuantumCircuit(q)
qc.ccx(q[0], q[1], q[2])
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
qc = QuantumCircuit(q)
qc.cswap(q[0], q[1], q[2])
circuit_drawer(qc)
job = execute(qc, backend)
np.round(job.result().get_data(qc)['unitary'], 3)
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.measure(q, c)
circuit_drawer(qc)
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend, shots=1024)
job.result().get_counts(qc)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.measure(q, c)
circuit_drawer(qc)
job = execute(qc, backend, shots=1024)
job.result().get_counts(qc)
qc = QuantumCircuit(q, c)
qc.reset(q[0])
qc.measure(q, c)
circuit_drawer(qc)
job = execute(qc, backend, shots=1024)
job.result().get_counts(qc)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.reset(q[0])
qc.measure(q, c)
circuit_drawer(qc)
job = execute(qc, backend, shots=1024)
job.result().get_counts(qc)
qc = QuantumCircuit(q, c)
qc.x(q[0]).c_if(c, 0)
qc.measure(q,c)
circuit_drawer(qc)
job = execute(qc, backend, shots=1024)
job.result().get_counts(qc)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.measure(q,c)
qc.x(q[0]).c_if(c, 0)
qc.measure(q,c)
circuit_drawer(qc)
job = execute(qc, backend, shots=1024)
job.result().get_counts(qc)
# Initializing a three-qubit quantum state
import math
desired_vector = [
1 / math.sqrt(16) * complex(0, 1),
1 / math.sqrt(8) * complex(1, 0),
1 / math.sqrt(16) * complex(1, 1),
0,
0,
1 / math.sqrt(8) * complex(1, 2),
1 / math.sqrt(16) * complex(1, 0),
0]
q = QuantumRegister(3)
qc = QuantumCircuit(q)
qc.initialize(desired_vector, [q[0],q[1],q[2]])
backend = Aer.get_backend('statevector_simulator')
job = execute(qc, backend)
qc_state = job.result().get_statevector(qc)
qc_state
state_fidelity(desired_vector,qc_state)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Single Qubit Quantum states
Step2: u gates
Step3: The $u2(\phi, \lambda) =u3(\pi/2, \phi, \lambda)$ has the matrix form
Step4: The $u1(\lambda)= u3(0, 0, \lambda)$ gate has the matrix form
Step5: The $u0(\delta)= u3(0, 0, 0)$ gate is the identity matrix. It has the matrix form
Step6: Identity gate
Step7: Pauli gates
Step8: $Y$
Step9: $Z$
Step10: Clifford gates
Step11: $S$ (or, $\sqrt{Z}$ phase) gate
Step12: $S^{\dagger}$ (or, conjugate of $\sqrt{Z}$ phase) gate
Step13: $C3$ gates
Step14: $T^{\dagger}$ (or, conjugate of $\sqrt{S}$ phase) gate
Step15: Standard Rotations
Step16: Rotation around Y-axis
Step17: Rotation around Z-axis
Step18: Note this is different due only to a global phase
Step19: Controlled Pauli Gates
Step20: Controlled $Y$ gate
Step21: Controlled $Z$ (or, controlled Phase) gate
Step22: Controlled Hadamard gate
Step23: Controlled rotation gates
Step24: Controlled phase rotation
Step25: I THINK SHOULD BE CALLED $C_\mathrm{PHASE}(\lambda)$
Step26: Controlled $u3$ rotation
Step27: NOTE I NEED TO FIX THIS AND DECIDE ON CONVENTION - I ACTUALLY THINK WE WANT A FOUR PARAMETER GATE AND JUST CALL IT CU AND TO REMOVE THIS GATE.
Step28: Three-qubit gates
Step29: Controlled swap gate (Fredkin Gate)
Step30: Non unitary operations
Step31: Measurements
Step32: The simulator predicts that 100 percent of the time the classical register returns 0.
Step33: The simulator predicts that 50 percent of the time the classical register returns 0 or 1.
Step34: Here we see that for both of these circuits the simulator always predicts that the output is 100 percent in the 0 state.
Step35: Here the classical bit always takes the value 0 so the qubit state is always flipped.
Step36: Here the classical bit by the first measurement is random but the conditional operation results in the qubit being deterministically put into $|1\rangle$.
Step37: Fidelity is useful to check whether two states are same or not.
|
13,286
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
# Read data, sort by year & month
dateparse = lambda x: pd.datetime.strptime(x, '%Y%m%d')
noaa_monthly = pd.read_csv('mpls-noaa.csv', index_col=2,
parse_dates=True, date_parser=dateparse, na_values=-9999)
noaa_monthly = noaa_monthly.groupby([noaa_monthly.index.year, noaa_monthly.index.month]).sum()
# Sum seasonal totals
winter_vars = ['MNTM','EMNT','DT00','DX32','HTDD','MXSD','EMXP','TSNW','DP10']
year_start = 1938
year_end = 2014
season_start = 11 #November
season_end = 3 #March
noaa_winters = pd.concat(
[noaa_monthly.loc[(year, season_start):(year+1, season_end), winter_vars].sum(axis=0)
for year in range(year_start, year_end+1)], axis=1).transpose()
noaa_winters.index = range(year_start, year_end+1)
# Fix variables that should have been handled differently
noaa_winters['TSNW'] /= 24.4
for year in noaa_winters.index:
noaa_winters.loc[year, 'MNTM'] = \
noaa_monthly.loc[(year, season_start):(year+1, season_end), 'MNTM'].mean() * 0.18 + 32
noaa_winters.loc[year, 'EMNT'] = \
noaa_monthly.loc[(year, season_start):(year+1, season_end), 'EMNT'].min() * 0.18 + 32
noaa_winters.loc[year, 'MXSD'] = \
noaa_monthly.loc[(year, season_start):(year+1, season_end), 'MXSD'].max() / 24.4
noaa_winters.loc[year, 'EMXP'] = \
noaa_monthly.loc[(year, season_start):(year+1, season_end), 'EMXP'].max() / 24.4
acronym = { 'HTDD': 'Heating degree days',
'DP10': 'Number of days with greater than or equal to 1.0 inch of precipitation',
'MXSD': 'Maximum snow depth, inches',
'EMXP': 'Extreme maximum daily precipitation, inches',
'DT00': 'Number days with minimum temperature less than or equal to 0.0 F',
'DX32': 'Number days with maximum temperature less than or equal to 32.0 F',
'EMNT': 'Extreme minimum daily temperature',
'TSNW': 'Total snow fall, inches',
'MNTM': 'Mean temperature (F)'}
# Plot variables
import matplotlib.pyplot as plt
%matplotlib inline
for v in noaa_winters.columns:
noaa_winters[v].plot(figsize=(13,3), color='skyblue');
# Compute rolling mean, use linear interpolation for the data gap in TSNW
pd.rolling_mean(noaa_winters[v].interpolate(), 15).plot(color='blue')
plt.title(acronym[v])
plt.legend(["observed data", "15-year rolling average"], loc='best')
plt.show()
# Find the best & worst for each variable
winter_coldest = pd.Series(index=noaa_winters.columns)
winter_warmest = pd.Series(index=noaa_winters.columns)
# For these variables, big is bad
for v in ['HTDD','MXSD','EMXP','DT00','DX32','TSNW','DP10']:
winter_coldest[v] = noaa_winters[v].max()
winter_warmest[v] = noaa_winters[v].min()
# For these variables, small (or negative) is bad
for v in ['MNTM','EMNT']:
winter_coldest[v] = noaa_winters[v].min()
winter_warmest[v] = noaa_winters[v].max()
# Assign scores to each year
winter_score = 100 * (noaa_winters-winter_warmest).abs() / (winter_coldest-winter_warmest).abs()
badness = winter_score.mean(axis=1)
# Plot the Badness Index
badness.plot(figsize=(13,6), marker='s', color='skyblue', xticks=badness.index[2::5])
pd.rolling_mean(badness, 15).plot(color='blue')
plt.title("Badness Index of each Minneapolis winter")
plt.ylabel("Badness index")
plt.xlabel("Year (start of winter)")
plt.legend(["Computed Badness", "15-year rolling average"], loc="best")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Definition of variables
Step2: The Badness Index of each winter
|
13,287
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
import os
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original', data_home='datasets/')
# Convert sklearn 'datasets bunch' object to Pandas DataFrames
y = pd.Series(mnist.target).astype('int').astype('category')
X = pd.DataFrame(mnist.data)
X.shape, y.shape
# Change column-names in X to reflect that they are pixel values
num_images = X.shape[1]
X.columns = ['pixel_'+str(x) for x in range(num_images)]
# print first row of X
X.head(1)
X_values = pd.Series(X.values.ravel())
print(" min: {}, \n max: {}, \n mean: {}, \n median: {}, \n most common value: {}".format(X_values.min(),
X_values.max(),
X_values.mean(),
X_values.median(),
X_values.value_counts().idxmax()))
len(np.unique(X.values))
# First row is first image
first_image = X.loc[0,:]
first_label = y[0]
# 784 columns correspond to 28x28 image
plottable_image = np.reshape(first_image.values, (28, 28))
# Plot the image
plt.imshow(plottable_image, cmap='gray_r')
plt.title('Digit Label: {}'.format(first_label))
plt.show()
images_to_plot = 9
random_indices = random.sample(range(70000), images_to_plot)
sample_images = X.loc[random_indices, :]
sample_labels = y.loc[random_indices]
plt.clf()
plt.style.use('seaborn-muted')
fig, axes = plt.subplots(3,3,
figsize=(5,5),
sharex=True, sharey=True,
subplot_kw=dict(adjustable='box-forced', aspect='equal')) #https://stackoverflow.com/q/44703433/1870832
for i in range(images_to_plot):
# axes (subplot) objects are stored in 2d array, accessed with axes[row,col]
subplot_row = i//3
subplot_col = i%3
ax = axes[subplot_row, subplot_col]
# plot image on subplot
plottable_image = np.reshape(sample_images.iloc[i,:].values, (28,28))
ax.imshow(plottable_image, cmap='gray_r')
ax.set_title('Digit Label: {}'.format(sample_labels.iloc[i]))
ax.set_xbound([0,28])
plt.tight_layout()
plt.show()
y.value_counts(normalize=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Shape, Summary Stats
Step2: Below we see min, max, mean and most-common pixel-intensity values for our rows/images. As suggested by the first row above, our most common value is 0. In fact even the median is 0, which means over half of our pixels are background/blank space. Makes sense.
Step3: We might wonder if there are only a few distinct pixel values present in the data (e.g. black, white, and a few shades of grey), but in fact we have all 256 values between our min-max of 0-255
Step4: Viewing the Digit Images
Step5: And here's a few more...
Step6: Final Wrap-up
|
13,288
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Chris Holdgraf <choldgraf@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from mayavi import mlab
import mne
from mne.viz import plot_alignment, snapshot_brain_montage
print(__doc__)
mat = loadmat(mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat')
ch_names = mat['ch_names'].tolist()
elec = mat['elec'] # electrode positions given in meters
dig_ch_pos = dict(zip(ch_names, elec))
mon = mne.channels.DigMontage(dig_ch_pos=dig_ch_pos)
print('Created %s channel positions' % len(ch_names))
info = mne.create_info(ch_names, 1000., 'ecog', montage=mon)
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces=['pial'])
mlab.view(200, 70)
# We'll once again plot the surface, then take a snapshot.
fig_scatter = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces='pial')
mlab.view(200, 70)
xy, im = snapshot_brain_montage(fig_scatter, mon)
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in info['ch_names']])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
_, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's load some ECoG electrode locations and names, and turn them into
Step2: Now that we have our electrode positions in MRI coordinates, we can create
Step3: We can then plot the locations of our electrodes on our subject's brain.
Step4: Sometimes it is useful to make a scatterplot for the current figure view.
|
13,289
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Import neurom module
import neurom as nm
# Import neurom visualization module
from neurom import viewer
# Load a single morphology
neuron = nm.load_neuron('../test_data/valid_set/Neuron.swc')
# Load a population of morphologies from a set of files
pop = nm.load_neurons('../test_data/valid_set/')
# Get a single morphology from the population
single_neuron = pop.neurons[0]
# Visualize a morphology in two dimensions
fig, ax = viewer.draw(neuron)
# Visualize a morphology in three dimensions
fig, ax = viewer.draw(neuron, mode='3d')
# Visualize a single tree in three dimensions
fig, ax = viewer.draw(neuron.neurites[0], mode='3d')
# Visualize the dendrogram of a morphology
fig, ax = viewer.draw(neuron, mode='dendrogram')
# Extract the total number of neurites (basal and apical dendrites, and axons)
number_of_neurites = nm.get('number_of_neurites', neuron)
# Extract the total number of sections
number_of_sections = nm.get('number_of_sections', neuron)
# Extract the soma radius
soma_radius = neuron.soma.radius
# Extract the number of sections per neurite
number_of_sections_per_neurite = nm.get('number_of_sections_per_neurite', neuron)
# Print result
print("Neuron id : {0} \n\
Number of neurites : {1} \n\
Soma radius : {2:.2f} \n\
Number of sections : {3}".format(neuron.name, number_of_neurites[0], soma_radius, number_of_sections[0]))
print()
print("Neurite type \t\t\t| Number of sections")
for i, neurite in enumerate(neuron.neurites):
print("{0:31} | {1}".format(str(neurite.type), number_of_sections_per_neurite[i]))
# Extract the lengths of the sections
section_lengths = nm.get('section_lengths', neuron)
# Extract the lengths of the segments
segment_lengths = nm.get('segment_lengths', neuron)
# Extract the local bifurcation angles
local_bif_angles = nm.get('local_bifurcation_angles', neuron)
# Extract the remote bifurcation angles
remote_bif_angles = nm.get('remote_bifurcation_angles', neuron)
# Extract the radial distances of the sections
section_radial_distances = nm.get('section_radial_distances', neuron)
# Extract the path distances of the sections
section_path_distances = nm.get('section_path_distances', neuron)
# Print result
features = (segment_lengths, section_lengths, local_bif_angles,
remote_bif_angles, section_path_distances, section_radial_distances)
def check(feature_list, n):
return '{0:.2f}'.format(feature_list[n]) if n < len(feature_list) else ''
print('|sg_len|sc_len|lc_bif_angles|rm_bif_angles|sc_path_dists|sc_rad_dists|')
for n in range(0, 50):
args = (check(f, n) for f in features)
print('|{0:^6}|{1:^6}|{2:^13}|{3:^13}|{4:^13}|{5:^12}|'.format(*args))
# Extract the section lengths of axonal trees
ax_section_lengths = nm.get('section_lengths', neuron, neurite_type=nm.AXON)
# Extract the section lengths of basal dendrite trees
ba_section_lengths = nm.get('section_lengths', neuron, neurite_type=nm.BASAL_DENDRITE)
# Extract the section lengths of apical dendrite trees
ap_section_lengths = nm.get('section_lengths', neuron, neurite_type=nm.APICAL_DENDRITE)
print('\nAxonal section lengths = ', ax_section_lengths)
print('\nBasal section lengths = ', ba_section_lengths)
print('\nApical section lengths = ', ap_section_lengths)
import numpy as np
# We can get the mean section length
mean_sl = np.mean(section_lengths)
# We can get the standard deviation of the section lengths
std_sl = np.std(section_lengths)
# We can get the minimum section length
min_sl = np.min(section_lengths)
# ... and the maximum section length
max_sl = np.max(section_lengths)
print('Section length statistics:')
print(' [mean, std] = [{0:.2f}, {1:.2f}]'.format(mean_sl, std_sl))
print(' [min, max]: [{0:.2f}, {1:.2f}]'.format(min_sl, max_sl))
import matplotlib.pyplot as plt
# Select the feature of choice
feature = nm.get('segment_lengths', neuron)
# Create empty figure
fig = plt.figure(figsize=(11,3))
# Create histogram
ax = fig.add_subplot('131')
ax.hist(feature, bins=25, edgecolor='black')
# Create cumulative histogram
ax = fig.add_subplot('132')
ax.hist(feature, bins=25, cumulative=True, edgecolor='black')
# Create boxplot; flier points are indicated with green dots
ax = fig.add_subplot('133')
_ = ax.boxplot(feature, sym='g.')
from neurom import stats
data = nm.get('segment_lengths', neuron)
# Let’s start with a normal distribution. We will fit the data that we extracted above with a normal distribution
p = stats.fit(data, distribution='norm')
# The output of the function is a named tuple of type FitResults
print('Fit output type : ', type(p))
# The parameters are stored in the variable params, which in the case of the normal distribution stores the mu and sigma
# of the normal distribution
mu, sigma = p.params
ks_dist, pvalue = p.errs
# Print result
print('[mu, sigma] : [{0:.2f}, {1:.2f}]\n'.format(mu, sigma))
# We need to check the statistical error of the performed fit to evaluate the accuracy of the
# selected model. To do so we use the errors variable of FitResults:
print('Kolmogorov-Smirnov distance : {0:.2f}'.format(ks_dist))
print('P-value : {0:.2f}'.format(pvalue))
from scipy.stats import norm
# Create a histogram as above
fig = plt.figure()
plt.hist(data, bins=25, density=True, edgecolor='black')
# Plot range: 5 standard deviations around the mean
norm_range = np.arange(mu - 5.*sigma, mu + 5.*sigma, 0.001)
# Plot the normal pdf with the given range, mu and sigma
_ = plt.plot(norm_range, norm.pdf(norm_range, mu, sigma), linewidth=3., c='r', alpha=0.8)
p = stats.optimal_distribution(data, distr_to_check=('lognorm', 'logistic', 'norm'))
print('Fit results:', p)
# Threshold value
threshold = 10
# Get the ids of sections which length exceeds the threshold
selected_ids = np.where(section_lengths > threshold)
# Get the values of section lengths that exceed the threshold
section_lengths[selected_ids]
# Get the length of all sections with a radial distance between 0.0 and 60.0
section_indices = np.where((section_radial_distances >= 0.0) & (section_radial_distances < 60.0))
selected_section_lengths = section_lengths[section_indices]
print(selected_section_lengths)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Loading a morphology or a population
Step2: 2. Morphology visualization
Step3: 3. Morphology analysis
Step4: 3.2 Analyze different types of trees
Step5: 3.3 Perform statistical analysis on extracted measurements
Step6: 3.4 Generate plots from the extracted morphometrics
Step7: 3.5 Fit the extracted data with a statistical distribution
Step8: The result of the fitting can be visualized
Step9: It is also possible to find the optimal distribution that best fits the data, among a number of distributions that are
Step10: 3.6 Apply more advanced manipulation on extracted data
Step11: 3.7 Combine morphometrics
|
13,290
|
<ASSISTANT_TASK:>
Python Code:
def tokenize(message):
message = message.lower()
all_words = re.findall('[a-z0-9]+', message)
return set(all_words)
from collections import defaultdict
def count_words(training_set):
training set consists of pairs (message, is_spam)
counts = defaultdict(lambda: [0, 0])
for message, is_spam in training_set:
for word in tokenize(message):
counts[word][0 if is_spam else 1] += 1
return counts
def word_probabilities(counts, total_spams, total_non_spams, k=0.5):
turn the word_counts into a list of triplets
w, p(w | spam) and p(w | ~spam)
return [(w,
(spam + k) / (total_spams + 2 * k),
(non_spam + k) / (total_non_spams + 2 * k))
for w, (spam, non_spam) in counts.items()]
def spam_probability(word_probs, message):
message_words = tokenize(message)
log_prob_if_spam = log_prob_if_not_spam = 0.0
# iterate through each word in our vocabulary
for word, prob_if_spam, prob_if_not_spam in word_probs:
# if *word* appears in the message,
# add the log probability of seeing it
if word in message_words:
log_prob_if_spam += math.log(prob_if_spam)
log_prob_if_not_spam += math.log(prob_if_not_spam)
# if *word* doesn't appear in the message
# add the log probability of _not_ seeing it
# which is log(1 - probability of seeing it)
else:
log_prob_if_spam += math.log(1.0 - prob_if_spam)
log_prob_if_not_spam += math.log(1.0 - prob_if_not_spam)
prob_if_spam = math.exp(log_prob_if_spam)
prob_if_not_spam = math.exp(log_prob_if_not_spam)
return prob_if_spam / (prob_if_spam + prob_if_not_spam)
class NaiveBayesClassifier:
def __init__(self, k=0.5):
self.k = k
self.word_probs = []
def train(self, training_set):
# count spam and non-spam messages
num_spams = len([is_spam for message, is_spam in training_set if is_spam])
num_non_spams = len(training_set) - num_spams
# run training data through our "pipeline"
word_counts = count_words(training_set)
self.word_probs = word_probabilities(word_counts, num_spams, num_non_spams, self.k)
def classify(self, message):
return spam_probability(self.word_probs, message)
import glob, re
import random
# modify the path with wherever you've put the files
path = r'../../data/*/*'
data = []
# glob.glob returns every filename that matches the wildcarded path
for fn in glob.glob(path):
is_spam = "ham" not in fn
with open(fn,'r') as file:
try:
for line in file:
if line.startswith("Subject:"):
# remove the leading "Subject: " and keep what's left
subject = re.sub(r"^Subject: ", "", line).strip()
data.append((subject, is_spam))
except UnicodeDecodeError:
pass
random.seed(0) # just so you get the same answers as me
train_data, test_data = split_data(data, 0.75)
classifier = NaiveBayesClassifier()
classifier.train(train_data)
from collections import Counter
# triplets (subject, actual is_spam, predicted spam probability)
classified = [(subject, is_spam, classifier.classify(subject))
for subject, is_spam in test_data]
# assume that spam_probability > 0.5 corresponds to spam prediction
# and count the combinations of (actual is_spam, predicted is_spam)
counts = Counter((is_spam, spam_probability > 0.5)
for _, is_spam, spam_probability in classified)
counts
# sort by spam_probability from smallest to largest
classified.sort(key=lambda row: row[2])
# the highest predicted spam probabilities among the non-spams
spammiest_hams = list(filter(lambda row: not row[1], classified))[-5:]
# the lowest predicted spam probabilities among the actual spams
hammiest_spams = list(filter(lambda row: row[1], classified))[:5]
spammiest_hams
hammiest_spams
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Spam Filter Implementation
Step3: Download the following files and extract them into a folder which we will reference below
|
13,291
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from astropy.table import Table, join
import matplotlib.pyplot as plt
%matplotlib inline
idp = "idata/main_pdf_v0.8-b"
idp_old = "idata/main_pdf_v0.8"
lnew = Table.read(idp+"/lofar_m5.fits")
lold = Table.read(idp_old+"/lofar_m5.fits")
merged = join(lnew, lold, keys=["Source_Name"], table_names=["new", "old"])
len(lnew)
len(lold)
diff_lr = merged['lr_5_old']-merged['lr_5_new']
len(diff_lr)
plt.plot(merged['lr_5_new'], diff_lr, marker=",", ls="")
plt.xlabel("LR (new)")
plt.ylabel("LR diff")
plt.loglog(merged['lr_5_new'], np.abs(diff_lr), marker=",", ls="")
plt.xlabel("log LR (new)")
plt.ylabel("log abs. LR diff")
told = 0.69787
tnew = 0.71684
cond_sel = ((merged['lr_5_old'] >= told) | (merged['lr_5_new'] >= tnew))
cond_nosel = ((merged['lr_5_old'] < told) & (merged['lr_5_new'] < tnew))
plt.semilogy(merged['lr_5_new'][cond_sel], (np.abs(diff_lr)/merged['lr_5_new']*100)[cond_sel], marker=",", ls="")
plt.semilogy(merged['lr_5_new'][cond_nosel], (np.abs(diff_lr)/merged['lr_5_new']*100)[cond_nosel], marker=",", ls="")
plt.xlabel("LR (new)")
plt.ylabel("log percent abs. LR diff")
cond_sel = ((merged['lr_5_old'] >= told) | (merged['lr_5_new'] >= tnew)) & (~np.isnan(merged['category_old']))
cond_nosel = ((merged['lr_5_old'] < told) & (merged['lr_5_new'] < tnew)) & (~np.isnan(merged['category_old']))
cond_change = ((merged['lr_5_old'] >= told) & (merged['lr_5_new'] < tnew)) & (~np.isnan(merged['category_old']))
plt.loglog(merged['lr_5_new'][cond_sel], (np.abs(diff_lr)/merged['lr_5_new']*100)[cond_sel], marker=",", ls="", label="Selected")
plt.loglog(merged['lr_5_new'][cond_nosel], (np.abs(diff_lr)/merged['lr_5_new']*100)[cond_nosel], marker=",", ls="", label="Rejected")
plt.loglog(merged['lr_5_new'][cond_change], (np.abs(diff_lr)/merged['lr_5_new']*100)[cond_change], marker="x", ls="", label="Changes")
plt.xlabel("log LR (new)")
plt.ylabel("log percent abs. LR diff")
plt.legend()
# Use new threshold for old data as well
cond_sel = ((merged['lr_5_old'] >= tnew) | (merged['lr_5_new'] >= tnew))
cond_nosel = ((merged['lr_5_old'] < tnew) & (merged['lr_5_new'] < tnew))
cond_change = ((merged['lr_5_old'] >= tnew) & (merged['lr_5_new'] < tnew))
plt.loglog(merged['lr_5_new'][cond_sel], (np.abs(diff_lr)/merged['lr_5_new']*100)[cond_sel], marker=",", ls="", label="Selected")
plt.loglog(merged['lr_5_new'][cond_nosel], (np.abs(diff_lr)/merged['lr_5_new']*100)[cond_nosel], marker=",", ls="", label="Rejected")
plt.loglog(merged['lr_5_new'][cond_change], (np.abs(diff_lr)/merged['lr_5_new']*100)[cond_change], marker="x", ls="", label="Changes")
plt.xlabel("log LR (new)")
plt.ylabel("log percent abs. LR diff")
plt.legend()
cond_change = ((merged['lr_5_old'] >= told) & (merged['lr_5_new'] < tnew))
plt.subplot(1,2,1)
plt.semilogx(merged['lr_5_new'][cond_change], merged['lr_5_old'][cond_change], marker="x", ls="")
plt.xlabel("log LR (new)")
plt.ylabel("LR (old)")
plt.subplot(1,2,2)
plt.plot(merged['lr_5_new'][cond_change], merged['lr_5_old'][cond_change], marker="x", ls="")
plt.xlabel("LR (new)")
plt.ylabel("LR (old)")
np.sum((merged['lr_5_old'] >= told) & (merged['lr_5_new'] >= tnew))
np.sum((merged['lr_5_old'] >= told) & (merged['lr_5_new'] < tnew))
np.sum((merged['lr_5_old'] < told) & (merged['lr_5_new'] >= tnew))
np.sum((merged['lr_5_old'] < told) & (merged['lr_5_new'] < tnew))
4/91332*100
cond_change = ((merged['lr_5_old'] >= told) & (merged['lr_5_new'] < tnew) & (np.abs(diff_lr)/merged['lr_5_new']*100 > 10))
merged[cond_change]
cond = (np.abs(diff_lr)/merged['lr_5_new']*100 > 10)
merged[cond]
# ['Source_Name','ML_LR_new','AllWISE_new',
# 'objID_new','category_new','W1mag_new','i_new','lr_index_5_new',
# 'lr_dist_5_new','lr_5_new','lr_index_sel_5_new','ML_LR_old','AllWISE_old',
# 'objID_old','category_old','W1mag_old','i_old','lr_index_5_old',
# 'lr_dist_5_old','lr_5_old','lr_index_sel_5_old']
cond = (np.abs(diff_lr)/merged['lr_5_new']*100 > 1)
merged[cond & (merged['lr_5_old'] >= told)]['Source_Name','ML_LR_new','AllWISE_new',
'objID_new','category_new','W1mag_new','i_new','lr_index_5_new',
'lr_dist_5_new','lr_5_new','lr_index_sel_5_new','ML_LR_old','AllWISE_old',
'objID_old','category_old','W1mag_old','i_old','lr_index_5_old',
'lr_dist_5_old','lr_5_old','lr_index_sel_5_old']
cond = (np.abs(diff_lr)/merged['lr_5_new']*100 > 1)
merged[cond & (merged['lr_5_old'] >= told) & (merged['lr_index_5_old'] != merged['lr_index_5_new'])]['Source_Name','ML_LR_new','AllWISE_new',
'objID_new','category_new','W1mag_new','i_new','lr_index_5_new',
'lr_dist_5_new','lr_5_new','lr_index_sel_5_new','ML_LR_old','AllWISE_old',
'objID_old','category_old','W1mag_old','i_old','lr_index_5_old',
'lr_dist_5_old','lr_5_old','lr_index_sel_5_old']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data
Step2: Check the shape of the data
Step3: Compute the difference between the LR
Step4: Explore the differences
Step5: Plot of the absolute difference in LR with respect to the new LR in log space.
Step6: Thresholds
Step7: Plot of the percentage of absolute difference in LR with respect to the new LR with the y axis in log space. Sources always below the threshold are plotted in orange.
Step8: Plot of the percentage of absolute difference in LR with respect to the new LR in log space. Sources always below the threshold are plotted in orange and the galaxies that are not matched now are plotted as green crosses.
Step9: The same plot than before but using the new threshol
Step10: Direct comparison of the LR of the sources that changed. Most of them are close to the threshold change but in 4 cases there are big differences.
Step11: Number of sources with respect to the thresholds
Step12: The percentage of changes due to the bug
|
13,292
|
<ASSISTANT_TASK:>
Python Code:
import requests
import json
#import ibmseti
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import tensorflow as tf
import pickle
import time
#!sudo pip install sklearn
import os
from sklearn.metrics import confusion_matrix
from sklearn import metrics
### SET YOUR TEAM NAME HERE! Use this folder to save intermediate results
team_name = 'Saeed_team'
mydatafolder = os.path.join( os.environ['PWD'], team_name ) #Change my_data_folder to your team name
if os.path.exists(mydatafolder) is False:
os.makedirs(mydatafolder)
print mydatafolder
!wget --output-document SETI.zip https://ibm.box.com/shared/static/jhqdhcblhua5dx2t7ixwm88okitjrl6l.zip
!unzip -o SETI.zip
import SETI
ds_directory = mydatafolder + '/SETI/SETI_ds_64x128/'
print os.popen("ls -lrt "+ ds_directory).read() # to verify
#from tensorflow.examples.tutorials.mnist import input_data
#dataset = input_data.read_data_sets("MNIST_data/", one_hot=True)
dataset = SETI.read_data_sets(ds_directory, one_hot=True, validation_size=0)
dataset.train.images.shape
# Parameters
decay_rate=0.96
decay_steps=1000
learning_rate = 0.005
training_epochs = 200
batch_size = 50
display_step = 100
#check point directory
chk_directory = mydatafolder+'/save/'
checkpoint_path = chk_directory+'model.ckpt'
n_classes = 4 # number of possible classifications for the problem
dropout = 0.50 # Dropout, probability to keep units
height = 64 # height of the image in pixels
width = 128 # width of the image in pixels
n_input = width * height # number of pixels in one image
x = tf.placeholder(tf.float32, shape=[None, n_input])
y_ = tf.placeholder(tf.float32, shape=[None, n_classes])
x_image = tf.reshape(x, [-1,height,width,1])
x_image
W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[32])) # need 32 biases for 32 outputs
convolve1 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1
h_conv1 = tf.nn.relu(convolve1)
conv1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #max_pool_2x2
conv1
W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64])) #need 64 biases for 64 outputs
convolve2= tf.nn.conv2d(conv1, W_conv2, strides=[1, 1, 1, 1], padding='SAME')+ b_conv2
h_conv2 = tf.nn.relu(convolve2)
conv2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 4, 4, 1], padding='SAME') #max_pool_2x2
conv2
input_layer = conv2
dim = input_layer.get_shape().as_list()
dim
dims= dim[1]*dim[2]*dim[3]
nodes1 = 1024
prv_layer_matrix = tf.reshape(input_layer, [-1, dims])
W_fc1 = tf.Variable(tf.truncated_normal([dims, nodes1], stddev=0.1))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[nodes1])) # need 1024 biases for 1024 outputs
h_fcl1 = tf.matmul(prv_layer_matrix, W_fc1) + b_fc1
fc_layer1 = tf.nn.relu(h_fcl1) # ???
fc_layer1
keep_prob = tf.placeholder(tf.float32)
layer_drop1 = tf.nn.dropout(fc_layer1, keep_prob)
W_fc = tf.Variable(tf.truncated_normal([nodes1, n_classes], stddev=0.1)) #1024 neurons
b_fc = tf.Variable(tf.constant(0.1, shape=[n_classes])) # 10 possibilities for classes [0,1,2,3]
fc = tf.matmul(layer_drop1, W_fc) + b_fc
y_CNN= tf.nn.softmax(fc)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_CNN, labels=y_))
# Create a variable to track the global step.
global_step = tf.Variable(0, trainable=False)
# create learning_decay
lr = tf.train.exponential_decay( learning_rate,
global_step,
decay_steps,
decay_rate, staircase=True )
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
optimizer = tf.train.GradientDescentOptimizer(lr)
train_op = optimizer.minimize(cross_entropy, global_step=global_step)
#train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_CNN,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
directory = os.path.dirname(chk_directory)
try:
os.stat(directory)
ckpt = tf.train.get_checkpoint_state(chk_directory)
print ckpt
except:
os.mkdir(directory)
# Initializing the variables
init = tf.global_variables_initializer()
loss_values = []
with tf.Session() as sess:
X_test = dataset.test.images
y_test = dataset.test.labels
sess.run(init)
saver = tf.train.Saver(tf.global_variables())
# load previously trained model if appilcable
ckpt = tf.train.get_checkpoint_state(chk_directory)
if ckpt:
print "loading model: ",ckpt.model_checkpoint_path
#saver.restore(sess, ckpt.model_checkpoint_path)
#step = 0
num_examples = dataset.train.num_examples
# Training cycle
for epoch in range(training_epochs):
avg_loss = 0.
avg_accuracy = 0.
#dataset.shuffle_data()
total_batch = int(num_examples / batch_size)
# Loop over all batches
start = time.time()
for step in range(total_batch):
x_batch, y_batch = dataset.train.next_batch(batch_size,shuffle=True)
train_op.run(feed_dict={x: x_batch, y_: y_batch, keep_prob: dropout})
loss, acc = sess.run([cross_entropy, accuracy], feed_dict={x: x_batch,y_: y_batch,keep_prob: 1.})
avg_loss += loss / total_batch
avg_accuracy += acc / total_batch
if step % display_step == 1000:
# Calculate batch loss and accuracy
loss, acc = sess.run([cross_entropy, accuracy], feed_dict={x: x_batch,y_: y_batch,keep_prob: 1.})
#train_accuracy = accuracy.eval(feed_dict={x:x_batch, y_: y_batch, keep_prob: 0.5})
test_accuracy = sess.run(accuracy, feed_dict={x: X_test[0:100], y_: y_test[0:100], keep_prob: 1.})
print("Iter " + str(step) + \
", Minibatch Loss= " + "{:.6f}".format(loss) + \
", Training Accuracy= " + "{:.5f}".format(acc) + \
", Test Accuracy= " + "{:.5f}".format(test_accuracy) )
# save model every 1 epochs
if epoch >= 0 and epoch % 1 == 0:
# Save model
#print ("model saved to {}".format(checkpoint_path))
#saver.save(sess, checkpoint_path, global_step = epoch)
end = time.time()
plr = sess.run(lr)
loss_values.append(avg_loss)
#print(sess.run(tf.train.global_step()))
print "Epoch:", '%04d' % (epoch+1) , ", Epoch time=" , "{:.5f}".format(end - start) , ", lr=", "{:.9f}".format(plr), ", cost=", "{:.9f}".format(avg_loss) ,", Acc=", "{:.9f}".format(avg_accuracy)
print("Optimization Finished!")
print ("model saved to {}".format(checkpoint_path))
saver.save(sess, checkpoint_path, global_step = (epoch+1)*step)
# Calculate accuracy for test images
#print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: X_test[0:30], y_: y_test[0:30], keep_prob: 1.}))
# Find the labels of test set
y_pred_lb = sess.run(tf.argmax(y_CNN,1), feed_dict={x: X_test[0:100], y_: y_test[0:100], keep_prob: 1.})
y_pred = sess.run(y_CNN, feed_dict={x: X_test[0:100], y_: y_test[0:100], keep_prob: 1.})
# lets save kernels
kernels_l1 = sess.run(tf.reshape(tf.transpose(W_conv1, perm=[2, 3, 0, 1]),[32,-1]))
kernels_l2 = sess.run(tf.reshape(tf.transpose(W_conv2, perm=[2, 3, 0, 1]),[32*64,-1]))
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.plot([np.mean(loss_values[i:i+5]) for i in range(len(loss_values))])
plt.show()
y_ = np.argmax(y_test[0:100],1) # ground truth
print metrics.classification_report(y_true= y_, y_pred= y_pred_lb)
print metrics.confusion_matrix(y_true= y_, y_pred= y_pred_lb)
print("Classification accuracy: %0.6f" % metrics.accuracy_score(y_true= y_, y_pred= y_pred_lb) )
print("Log Loss: %0.6f" % metrics.log_loss(y_true= y_, y_pred= y_pred, labels=range(4)) )
my_output_results = mydatafolder + '/' + 'DL_scores.csv'
with open(my_output_results, 'w') as csvfile:
np.savetxt(my_output_results, y_pred, delimiter=",")
print os.popen("ls -lrt "+ mydatafolder).read() # to verify
!wget --output-document utils1.py http://deeplearning.net/tutorial/code/utils.py
import utils1
from utils1 import tile_raster_images
#from utils import tile_raster_images
import matplotlib.pyplot as plt
from PIL import Image
%matplotlib inline
image = Image.fromarray(tile_raster_images(kernels_l1, img_shape=(5, 5) ,tile_shape=(4, 8), tile_spacing=(1, 1)))
### Plot image
plt.rcParams['figure.figsize'] = (18.0, 18.0)
imgplot = plt.imshow(image)
imgplot.set_cmap('gray')
image = Image.fromarray(tile_raster_images(kernels_l2, img_shape=(5, 5) ,tile_shape=(4, 12), tile_spacing=(1, 1)))
### Plot image
plt.rcParams['figure.figsize'] = (18.0, 18.0)
imgplot = plt.imshow(image)
imgplot.set_cmap('gray')
import numpy as np
plt.rcParams['figure.figsize'] = (5.0, 5.0)
sampleimage1 = X_test[3]
plt.imshow(np.reshape(sampleimage1,[64,128]), cmap="gray")
# Launch the graph
with tf.Session() as sess:
sess.run(init)
saver = tf.train.Saver(tf.all_variables())
# load previously trained model if appilcable
ckpt = tf.train.get_checkpoint_state(chk_directory)
if ckpt:
print "loading model: ",ckpt.model_checkpoint_path
saver.restore(sess, ckpt.model_checkpoint_path)
ActivatedUnits1 = sess.run(convolve1,feed_dict={x:np.reshape(sampleimage1,[1,64*128],order='F'),keep_prob:1.0})
plt.figure(1, figsize=(20,20))
n_columns = 3
n_rows = 3
for i in range(9):
plt.subplot(n_rows, n_columns, i+1)
plt.title('Filter ' + str(i))
plt.imshow(ActivatedUnits1[0,:,:,i], interpolation="nearest", cmap="gray")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set your team folder
Step2: Import dataset reader
Step3: Download data
Step4: Load data SETI
Step5: Network Parameters
Step6: Inputs
Step7: Convolutional Layer 1
Step8: Convolutional Layer 2
Step9: Convolutional Layer 3
Step10: Droupout 1
Step11: Fully Connected Layer 2
Step12: Loss function
Step13: Training
Step14: Evaluation
Step15: Create checkpoint directory
Step16: Training
Step17: Evaluation
Step18: Generate CSV file for Scoreboard
Step19: Viz
|
13,293
|
<ASSISTANT_TASK:>
Python Code:
import time
import math
import sys
import pickle
import copy
import os
import re
import numpy as np
from chainer import cuda, Variable, FunctionSet, optimizers
import chainer.functions as F
#-------------Explain7 in the Qiita-------------
n_epochs = 30
n_units = 625
batchsize = 100
bprop_len = 10
grad_clip = 0.5
data_dir = "data_hands_on"
checkpoint_dir = "cv"
#-------------Explain7 in the Qiita-------------
# input data
#-------------Explain1 in the Qiita-------------
def source_to_words(source):
line = source.replace("¥n", " ").replace("¥t", " ")
for spacer in ["(", ")", "{", "}", "[", "]", ",", ";", ":", "++", "!", "$", '"', "'"]:
line = line.replace(spacer, " " + spacer + " ")
words = [w.strip() for w in line.split()]
return words
def load_data():
vocab = {}
print ('%s/angular.js'% data_dir)
source = open('%s/angular_full_remake.js' % data_dir, 'r').read()
words = source_to_words(source)
freq = {}
dataset = np.ndarray((len(words),), dtype=np.int32)
for i, word in enumerate(words):
if word not in vocab:
vocab[word] = len(vocab)
freq[word] = 0
dataset[i] = vocab[word]
freq[word] += 1
print('corpus length:', len(words))
print('vocab size:', len(vocab))
return dataset, words, vocab, freq
#-------------Explain1 in the Qiita-------------
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
train_data, words, vocab, freq = load_data()
for f in ["frequent", "rarely"]:
print("{0} words".format(f))
print(sorted(freq.items(), key=lambda i: i[1], reverse=True if f == "frequent" else False)[:50])
#-------------Explain2 in the Qiita-------------
class CharRNN(FunctionSet):
ニューラルネットワークを定義している部分です。
上から順に入力された辞書ベクトル空間を隠れ層のユニット数に変換し、次に隠れ層の入
力と隠れ層を設定しています。
同様の処理を2層にも行い、出力層では語彙数に修正して出力しています。
なお最初に設定するパラメータは-0.08から0.08の間でランダムに設定しています。
def __init__(self, n_vocab, n_units):
順伝搬の記述です。
順伝搬の入力をVariableで定義し、入力と答えを渡しています。
入力層を先ほど定義したembedを用います。
隠れ層の入力には、先ほど定義したl1_xを用いて、引数にdropout、隠れ層の状態を渡して
います。
lstmに隠れ層第1層の状態とh1_inを渡します。
2層目も同様に記述し、出力層は状態を渡さずに定義します。
次回以降の入力に使用するため各状態は保持しています。
出力されたラベルと答えのラベル比較し、損失を返すのと状態を返しています。
def forward_one_step(self, x_data, y_data, state, train=True, dropout_ratio=0.5):
dropoutの記述を外して予測用のメソッドとして記述しています。
dropoutにはtrainという引数が存在し、trainの引数をfalseにしておくと動作しない
ので、予測の時は渡す引数を変えて学習と予測を変えても良いですが、今回は明示的に分る
ように分けて記述しました。
def predict(self, x_data, state):
状態の初期化です。
def make_initial_state(n_units, batchsize=100, train=True):
#-------------Explain2 in the Qiita-------------
# Prepare RNNLM model
model = CharRNN(len(vocab), n_units)
optimizer = optimizers.RMSprop(lr=2e-3, alpha=0.95, eps=1e-8)
optimizer.setup(model.collect_parameters())
whole_len = train_data.shape[0]
jump = whole_len // batchsize
epoch = 0
start_at = time.time()
cur_at = start_at
state = make_initial_state(n_units, batchsize=batchsize)
accum_loss = Variable(np.zeros((), dtype=np.float32))
cur_log_perp = np.zeros(())
for i in range(int(jump * n_epochs)):
#-------------Explain4 in the Qiita-------------
x_batch = np.array([train_data[(jump * j + i) % whole_len]
for j in range(batchsize)])
y_batch = np.array([train_data[(jump * j + i + 1) % whole_len]
for j in range(batchsize)])
state, loss_i = model.forward_one_step(x_batch, y_batch, state, dropout_ratio=0.7)
accum_loss += loss_i
cur_log_perp += loss_i.data.reshape(())
if (i + 1) % bprop_len == 0: # Run truncated BPTT
now = time.time()
cur_at = now
#print('{}/{}, train_loss = {}, time = {:.2f}'.format((i + 1)/bprop_len, jump, accum_loss.data / bprop_len, now-cur_at))
optimizer.zero_grads()
accum_loss.backward()
accum_loss.unchain_backward() # truncate
accum_loss = Variable(np.zeros((), dtype=np.float32))
optimizer.clip_grads(grad_clip)
optimizer.update()
if (i + 1) % 1000 == 0:
perp = math.exp(cuda.to_cpu(cur_log_perp) / 1000)
print('iter {} training perplexity: {:.2f} '.format(i + 1, perp))
fn = ('%s/charrnn_epoch_%i.chainermodel' % (checkpoint_dir, epoch))
pickle.dump(copy.deepcopy(model).to_cpu(), open(fn, 'wb'))
cur_log_perp.fill(0)
if (i + 1) % jump == 0:
epoch += 1
#-------------Explain4 in the Qiita-------------
sys.stdout.flush()
# load model
#-------------Explain6 in the Qiita-------------
model = pickle.load(open("cv/charrnn_epoch_1.chainermodel", 'rb'))
#-------------Explain6 in the Qiita-------------
n_units = model.embed.W.shape[1]
# initialize generator
state = make_initial_state(n_units, batchsize=1, train=False)
index = np.random.randint(0, len(vocab), 1)[0]
ivocab = {v:k for k, v in vocab.items()}
sampling_range = 5
for i in range(1000):
if ivocab[index] in ["}", ";"]:
sys.stdout.write(ivocab[index] + "\n")
else:
sys.stdout.write(ivocab[index] + " ")
#-------------Explain7 in the Qiita-------------
state, prob = model.predict(np.array([index], dtype=np.int32), state)
#index = np.argmax(prob.data)
index = np.random.choice(prob.data.argsort()[0,-sampling_range:][::-1], 1)[0]
#-------------Explain7 in the Qiita-------------
print
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: `導入するライブラリの代表例は下記です。
Step2: 3.データ入力
Step7: 4.リカレントニューラル言語モデル設定(ハンズオン)
Step8: RNNLM(リカレントニューラル言語モデルの設定を行っています)
Step9: 5.学習を始める前の設定
Step10: 6.パラメータ更新方法(ミニバッチ学習)
Step11: 7.言語の予測
Step12: state, prob = model.predict(prev_char, state)で予測された確率と状態を取得しています。次の予測にも使用するため状態も取得しています。
|
13,294
|
<ASSISTANT_TASK:>
Python Code:
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
# Install Python package dependencies.
print("Installing libraries")
! pip3 install {USER_FLAG} --quiet google-cloud-pipeline-components kfp
! pip3 install {USER_FLAG} --quiet --upgrade google-cloud-aiplatform google-cloud-bigquery
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
! gcloud config set project $PROJECT_ID
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
BUCKET_URI = f"gs://{BUCKET_NAME}"
if REGION == "[your-region]":
REGION = "us-central1"
! gsutil mb -l $REGION -p $PROJECT_ID $BUCKET_URI
! gsutil ls -al $BUCKET_URI
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your GCP project id from gcloud
shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].replace("*", "").strip()
print("Service Account:", SERVICE_ACCOUNT)
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_URI
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_URI
import sys
from typing import NamedTuple
from google.cloud import aiplatform as vertex
from google.cloud import bigquery
from google_cloud_pipeline_components import \
aiplatform as vertex_pipeline_components
from google_cloud_pipeline_components.experimental import \
bigquery as bq_components
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import Artifact, Input, Metrics, Output, component
PIPELINE_JSON_PKG_PATH = "rapid_prototyping.json"
PIPELINE_ROOT = f"gs://{BUCKET_NAME}/pipeline_root"
DATA_FOLDER = f"{BUCKET_NAME}/data"
RAW_INPUT_DATA = f"gs://{DATA_FOLDER}/abalone.csv"
BQ_DATASET = "j90wipxexhrgq3cquanc5" # @param {type:"string"}
BQ_LOCATION = "US" # @param {type:"string"}
BQ_LOCATION = BQ_LOCATION.upper()
BQML_EXPORT_LOCATION = f"gs://{BUCKET_NAME}/artifacts/bqml"
DISPLAY_NAME = "rapid-prototyping"
ENDPOINT_DISPLAY_NAME = f"{DISPLAY_NAME}_endpoint"
image_prefix = REGION.split("-")[0]
BQML_SERVING_CONTAINER_IMAGE_URI = (
f"{image_prefix}-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-8:latest"
)
if os.getenv("IS_TESTING"):
!gcloud --quiet components install beta
!gcloud --quiet components update
!gcloud config set project $PROJECT_ID
!gcloud config set ai/region $REGION
! gsutil cp gs://cloud-samples-data/vertex-ai/community-content/datasets/abalone/abalone.data {RAW_INPUT_DATA}
@component(base_image="python:3.9", packages_to_install=["google-cloud-bigquery"])
def import_data_to_bigquery(
project: str,
bq_location: str,
bq_dataset: str,
gcs_data_uri: str,
raw_dataset: Output[Artifact],
table_name_prefix: str = "abalone",
):
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client(project=project, location=bq_location)
def load_dataset(gcs_uri, table_id):
job_config = bigquery.LoadJobConfig(
schema=[
bigquery.SchemaField("Sex", "STRING"),
bigquery.SchemaField("Length", "NUMERIC"),
bigquery.SchemaField("Diameter", "NUMERIC"),
bigquery.SchemaField("Height", "NUMERIC"),
bigquery.SchemaField("Whole_weight", "NUMERIC"),
bigquery.SchemaField("Shucked_weight", "NUMERIC"),
bigquery.SchemaField("Viscera_weight", "NUMERIC"),
bigquery.SchemaField("Shell_weight", "NUMERIC"),
bigquery.SchemaField("Rings", "NUMERIC"),
],
skip_leading_rows=1,
# The source format defaults to CSV, so the line below is optional.
source_format=bigquery.SourceFormat.CSV,
)
print(f"Loading {gcs_uri} into {table_id}")
load_job = client.load_table_from_uri(
gcs_uri, table_id, job_config=job_config
) # Make an API request.
load_job.result() # Waits for the job to complete.
destination_table = client.get_table(table_id) # Make an API request.
print("Loaded {} rows.".format(destination_table.num_rows))
def create_dataset_if_not_exist(bq_dataset_id, bq_location):
print(
"Checking for existence of bq dataset. If it does not exist, it creates one"
)
dataset = bigquery.Dataset(bq_dataset_id)
dataset.location = bq_location
dataset = client.create_dataset(dataset, exists_ok=True, timeout=300)
print(f"Created dataset {dataset.full_dataset_id} @ {dataset.location}")
bq_dataset_id = f"{project}.{bq_dataset}"
create_dataset_if_not_exist(bq_dataset_id, bq_location)
raw_table_name = f"{table_name_prefix}_raw"
table_id = f"{project}.{bq_dataset}.{raw_table_name}"
print("Deleting any tables that might have the same name on the dataset")
client.delete_table(table_id, not_found_ok=True)
print("will load data to table")
load_dataset(gcs_data_uri, table_id)
raw_dataset_uri = f"bq://{table_id}"
raw_dataset.uri = raw_dataset_uri
@component(
base_image="python:3.9",
packages_to_install=["google-cloud-bigquery"],
) # pandas, pyarrow and fsspec required to export bq data to csv
def split_datasets(
raw_dataset: Input[Artifact],
bq_location: str,
) -> NamedTuple(
"bqml_split",
[
("dataset_uri", str),
("dataset_bq_uri", str),
("test_dataset_uri", str),
],
):
from collections import namedtuple
from google.cloud import bigquery
raw_dataset_uri = raw_dataset.uri
table_name = raw_dataset_uri.split("bq://")[-1]
print(table_name)
raw_dataset_uri = table_name.split(".")
print(raw_dataset_uri)
project = raw_dataset_uri[0]
bq_dataset = raw_dataset_uri[1]
bq_raw_table = raw_dataset_uri[2]
client = bigquery.Client(project=project, location=bq_location)
def split_dataset(table_name_dataset):
training_dataset_table_name = f"{project}.{bq_dataset}.{table_name_dataset}"
split_query = f
CREATE OR REPLACE TABLE
`{training_dataset_table_name}`
AS
SELECT
Sex,
Length,
Diameter,
Height,
Whole_weight,
Shucked_weight,
Viscera_weight,
Shell_weight,
Rings,
CASE(ABS(MOD(FARM_FINGERPRINT(TO_JSON_STRING(f)), 10)))
WHEN 9 THEN 'TEST'
WHEN 8 THEN 'VALIDATE'
ELSE 'TRAIN' END AS split_col
FROM
`{project}.{bq_dataset}.abalone_raw` f
dataset_uri = f"{project}.{bq_dataset}.{bq_raw_table}"
print("Splitting the dataset")
query_job = client.query(split_query) # Make an API request.
query_job.result()
print(dataset_uri)
print(split_query.replace("\n", " "))
return training_dataset_table_name
def create_test_view(training_dataset_table_name, test_view_name="dataset_test"):
view_uri = f"{project}.{bq_dataset}.{test_view_name}"
query = f
CREATE OR REPLACE VIEW `{view_uri}` AS SELECT
Sex,
Length,
Diameter,
Height,
Whole_weight,
Shucked_weight,
Viscera_weight,
Shell_weight,
Rings
FROM `{training_dataset_table_name}` f
WHERE
f.split_col = 'TEST'
print(f"Creating view for --> {test_view_name}")
print(query.replace("\n", " "))
query_job = client.query(query) # Make an API request.
query_job.result()
return view_uri
table_name_dataset = "dataset"
dataset_uri = split_dataset(table_name_dataset)
test_dataset_uri = create_test_view(dataset_uri)
dataset_bq_uri = "bq://" + dataset_uri
print(f"dataset: {dataset_uri}")
result_tuple = namedtuple(
"bqml_split",
["dataset_uri", "dataset_bq_uri", "test_dataset_uri"],
)
return result_tuple(
dataset_uri=str(dataset_uri),
dataset_bq_uri=str(dataset_bq_uri),
test_dataset_uri=str(test_dataset_uri),
)
def _query_create_model(
project_id: str,
bq_dataset: str,
training_data_uri: str,
model_name: str = "linear_regression_model_prototyping",
):
model_uri = f"{project_id}.{bq_dataset}.{model_name}"
model_options = OPTIONS
( MODEL_TYPE='LINEAR_REG',
input_label_cols=['Rings'],
DATA_SPLIT_METHOD='CUSTOM',
DATA_SPLIT_COL='split_col'
)
query = f
CREATE OR REPLACE MODEL
`{model_uri}`
{model_options}
AS
SELECT
Sex,
Length,
Diameter,
Height,
Whole_weight,
Shucked_weight,
Viscera_weight,
Shell_weight,
Rings,
CASE(split_col)
WHEN 'TEST' THEN TRUE
ELSE
FALSE
END
AS split_col
FROM
`{training_data_uri}`;
print(query.replace("\n", " "))
return query
@component(base_image="python:3.9")
def interpret_bqml_evaluation_metrics(
bqml_evaluation_metrics: Input[Artifact], metrics: Output[Metrics]
) -> dict:
import math
metadata = bqml_evaluation_metrics.metadata
for r in metadata["rows"]:
rows = r["f"]
schema = metadata["schema"]["fields"]
output = {}
for metric, value in zip(schema, rows):
metric_name = metric["name"]
val = float(value["v"])
output[metric_name] = val
metrics.log_metric(metric_name, val)
if metric_name == "mean_squared_error":
rmse = math.sqrt(val)
metrics.log_metric("root_mean_squared_error", rmse)
metrics.log_metric("framework", "BQML")
print(output)
# Inspired by Andrew Ferlitsch's work on https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage3/get_started_with_automl_pipeline_components.ipynb
@component(
base_image="python:3.9",
packages_to_install=[
"google-cloud-aiplatform",
],
)
def interpret_automl_evaluation_metrics(
region: str, model: Input[Artifact], metrics: Output[Metrics]
):
'
For a list of available regression metrics, go here: gs://google-cloud-aiplatform/schema/modelevaluation/regression_metrics_1.0.0.yaml.
More information on available metrics for different types of models: https://cloud.google.com/vertex-ai/docs/predictions/online-predictions-automl
import google.cloud.aiplatform.gapic as gapic
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{region}-aiplatform.googleapis.com"}
model_service_client = gapic.ModelServiceClient(client_options=client_options)
model_resource_name = model.metadata["resourceName"]
model_evaluations = model_service_client.list_model_evaluations(
parent=model_resource_name
)
model_evaluation = list(model_evaluations)[0]
available_metrics = [
"meanAbsoluteError",
"meanAbsolutePercentageError",
"rSquared",
"rootMeanSquaredError",
"rootMeanSquaredLogError",
]
output = dict()
for x in available_metrics:
val = model_evaluation.metrics.get(x)
output[x] = val
metrics.log_metric(str(x), float(val))
metrics.log_metric("framework", "AutoML")
print(output)
@component(base_image="python:3.9")
def select_best_model(
metrics_bqml: Input[Metrics],
metrics_automl: Input[Metrics],
thresholds_dict_str: str,
best_metrics: Output[Metrics],
reference_metric_name: str = "rmse",
) -> NamedTuple(
"Outputs",
[
("deploy_decision", str),
("best_model", str),
("metric", float),
("metric_name", str),
],
):
import json
from collections import namedtuple
best_metric = float("inf")
best_model = None
# BQML and AutoML use different metric names.
metric_possible_names = []
if reference_metric_name == "mae":
metric_possible_names = ["meanAbsoluteError", "mean_absolute_error"]
elif reference_metric_name == "rmse":
metric_possible_names = ["rootMeanSquaredError", "root_mean_squared_error"]
metric_bqml = float("inf")
metric_automl = float("inf")
print(metrics_bqml.metadata)
print(metrics_automl.metadata)
for x in metric_possible_names:
try:
metric_bqml = metrics_bqml.metadata[x]
print(f"Metric bqml: {metric_bqml}")
except:
print(f"{x} does not exist int the BQML dictionary")
try:
metric_automl = metrics_automl.metadata[x]
print(f"Metric automl: {metric_automl}")
except:
print(f"{x} does not exist on the AutoML dictionary")
# Change condition if higher is better.
print(f"Comparing BQML ({metric_bqml}) vs AutoML ({metric_automl})")
if metric_bqml <= metric_automl:
best_model = "bqml"
best_metric = metric_bqml
best_metrics.metadata = metrics_bqml.metadata
else:
best_model = "automl"
best_metric = metric_automl
best_metrics.metadata = metrics_automl.metadata
thresholds_dict = json.loads(thresholds_dict_str)
deploy = False
# Change condition if higher is better.
if best_metric < thresholds_dict[reference_metric_name]:
deploy = True
if deploy:
deploy_decision = "true"
else:
deploy_decision = "false"
print(f"Which model is best? {best_model}")
print(f"What metric is being used? {reference_metric_name}")
print(f"What is the best metric? {best_metric}")
print(f"What is the threshold to deploy? {thresholds_dict_str}")
print(f"Deploy decision: {deploy_decision}")
Outputs = namedtuple(
"Outputs", ["deploy_decision", "best_model", "metric", "metric_name"]
)
return Outputs(
deploy_decision=deploy_decision,
best_model=best_model,
metric=best_metric,
metric_name=reference_metric_name,
)
@component(base_image="python:3.9", packages_to_install=["google-cloud-aiplatform"])
def validate_infrastructure(
endpoint: Input[Artifact],
) -> NamedTuple(
"validate_infrastructure_output", [("instance", str), ("prediction", float)]
):
import json
from collections import namedtuple
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
def treat_uri(uri):
return uri[uri.find("projects/") :]
def request_prediction(endp, instance):
instance = json_format.ParseDict(instance, Value())
instances = [instance]
parameters_dict = {}
parameters = json_format.ParseDict(parameters_dict, Value())
response = endp.predict(instances=instances, parameters=parameters)
print("deployed_model_id:", response.deployed_model_id)
print("predictions: ", response.predictions)
# The predictions are a google.protobuf.Value representation of the model's predictions.
predictions = response.predictions
for pred in predictions:
if type(pred) is dict and "value" in pred.keys():
# AutoML predictions
prediction = pred["value"]
elif type(pred) is list:
# BQML Predictions return different format
prediction = pred[0]
return prediction
endpoint_uri = endpoint.uri
treated_uri = treat_uri(endpoint_uri)
instance = {
"Sex": "M",
"Length": 0.33,
"Diameter": 0.255,
"Height": 0.08,
"Whole_weight": 0.205,
"Shucked_weight": 0.0895,
"Viscera_weight": 0.0395,
"Shell_weight": 0.055,
}
instance_json = json.dumps(instance)
print("Will use the following instance: " + instance_json)
endpoint = aiplatform.Endpoint(treated_uri)
prediction = request_prediction(endpoint, instance)
result_tuple = namedtuple(
"validate_infrastructure_output", ["instance", "prediction"]
)
return result_tuple(instance=str(instance_json), prediction=float(prediction))
pipeline_params = {
"project": PROJECT_ID,
"region": REGION,
"gcs_input_file_uri": RAW_INPUT_DATA,
"bq_dataset": BQ_DATASET,
"bq_location": BQ_LOCATION,
"bqml_model_export_location": BQML_EXPORT_LOCATION,
"bqml_serving_container_image_uri": BQML_SERVING_CONTAINER_IMAGE_URI,
"endpoint_display_name": ENDPOINT_DISPLAY_NAME,
"thresholds_dict_str": '{"rmse": 2.5}',
}
@dsl.pipeline(name=DISPLAY_NAME, description="Rapid Prototyping")
def train_pipeline(
project: str,
gcs_input_file_uri: str,
region: str,
bq_dataset: str,
bq_location: str,
bqml_model_export_location: str,
bqml_serving_container_image_uri: str,
endpoint_display_name: str,
thresholds_dict_str: str,
):
# Imports data to BigQuery using a custom component.
import_data_to_bigquery_op = import_data_to_bigquery(
project, bq_location, bq_dataset, gcs_input_file_uri
)
raw_dataset = import_data_to_bigquery_op.outputs["raw_dataset"]
# Splits the BQ dataset using a custom component.
split_datasets_op = split_datasets(raw_dataset, bq_location=bq_location)
# Generates the query to create a BQML using a static function.
create_model_query = _query_create_model(
project, bq_dataset, split_datasets_op.outputs["dataset_uri"]
)
# Builds BQML model using pre-built-component.
bqml_create_op = bq_components.BigqueryCreateModelJobOp(
project=project, location=bq_location, query=create_model_query
)
bqml_model = bqml_create_op.outputs["model"]
# Gathers BQML evaluation metrics using a pre-built-component.
bqml_evaluate_op = bq_components.BigqueryEvaluateModelJobOp(
project=project, location=bq_location, model=bqml_model
)
bqml_eval_metrics_raw = bqml_evaluate_op.outputs["evaluation_metrics"]
# Analyzes evaluation BQML metrics using a custom component.
interpret_bqml_evaluation_metrics_op = interpret_bqml_evaluation_metrics(
bqml_evaluation_metrics=bqml_eval_metrics_raw
)
bqml_eval_metrics = interpret_bqml_evaluation_metrics_op.outputs["metrics"]
# Exports the BQML model to a GCS bucket using a pre-built-component.
bqml_export_op = bq_components.BigqueryExportModelJobOp(
project=project,
location=bq_location,
model=bqml_model,
model_destination_path=bqml_model_export_location,
).after(bqml_evaluate_op)
bqml_exported_gcs_path = bqml_export_op.outputs["exported_model_path"]
# Uploads the recently exported the BQML model from GCS into Vertex AI using a pre-built-component.
bqml_model_upload_op = vertex_pipeline_components.ModelUploadOp(
project=project,
location=region,
display_name=DISPLAY_NAME + "_bqml",
artifact_uri=bqml_exported_gcs_path,
serving_container_image_uri=bqml_serving_container_image_uri,
)
bqml_vertex_model = bqml_model_upload_op.outputs["model"]
# Creates a Vertex AI Tabular dataset using a pre-built-component.
dataset_create_op = vertex_pipeline_components.TabularDatasetCreateOp(
project=project,
location=region,
display_name=DISPLAY_NAME,
bq_source=split_datasets_op.outputs["dataset_bq_uri"],
)
# Trains an AutoML Tables model using a pre-built-component.
automl_training_op = vertex_pipeline_components.AutoMLTabularTrainingJobRunOp(
project=project,
location=region,
display_name=f"{DISPLAY_NAME}_automl",
optimization_prediction_type="regression",
optimization_objective="minimize-rmse",
predefined_split_column_name="split_col",
dataset=dataset_create_op.outputs["dataset"],
target_column="Rings",
column_transformations=[
{"categorical": {"column_name": "Sex"}},
{"numeric": {"column_name": "Length"}},
{"numeric": {"column_name": "Diameter"}},
{"numeric": {"column_name": "Height"}},
{"numeric": {"column_name": "Whole_weight"}},
{"numeric": {"column_name": "Shucked_weight"}},
{"numeric": {"column_name": "Viscera_weight"}},
{"numeric": {"column_name": "Shell_weight"}},
{"numeric": {"column_name": "Rings"}},
],
)
automl_model = automl_training_op.outputs["model"]
# Analyzes evaluation AutoML metrics using a custom component.
automl_eval_op = interpret_automl_evaluation_metrics(
region=region, model=automl_model
)
automl_eval_metrics = automl_eval_op.outputs["metrics"]
# 1) Decides which model is best (AutoML vs BQML);
# 2) Determines if the best model meets the deployment condition.
best_model_task = select_best_model(
metrics_bqml=bqml_eval_metrics,
metrics_automl=automl_eval_metrics,
thresholds_dict_str=thresholds_dict_str,
)
# If the deploy condition is True, then deploy the best model.
with dsl.Condition(
best_model_task.outputs["deploy_decision"] == "true",
name="deploy_decision",
):
# Creates a Vertex AI endpoint using a pre-built-component.
endpoint_create_op = vertex_pipeline_components.EndpointCreateOp(
project=project,
location=region,
display_name=endpoint_display_name,
)
endpoint_create_op.after(best_model_task)
# In case the BQML model is the best...
with dsl.Condition(
best_model_task.outputs["best_model"] == "bqml",
name="deploy_bqml",
):
# Deploys the BQML model (now on Vertex AI) to the recently created endpoint using a pre-built component.
model_deploy_bqml_op = (
vertex_pipeline_components.ModelDeployOp( # noqa: F841
endpoint=endpoint_create_op.outputs["endpoint"],
model=bqml_vertex_model,
deployed_model_display_name=DISPLAY_NAME + "_best_bqml",
dedicated_resources_machine_type="n1-standard-2",
dedicated_resources_min_replica_count=2,
dedicated_resources_max_replica_count=2,
traffic_split={
"0": 100
}, # newly deployed model gets 100% of the traffic
).set_caching_options(False)
)
# Sends an online prediction request to the recently deployed model using a custom component.
validate_infrastructure(
endpoint=endpoint_create_op.outputs["endpoint"]
).set_caching_options(False).after(model_deploy_bqml_op)
# In case the AutoML model is the best...
with dsl.Condition(
best_model_task.outputs["best_model"] == "automl",
name="deploy_automl",
):
# Deploys the AutoML model to the recently created endpoint using a pre-built component.
model_deploy_automl_op = (
vertex_pipeline_components.ModelDeployOp( # noqa: F841
endpoint=endpoint_create_op.outputs["endpoint"],
model=automl_model,
deployed_model_display_name=DISPLAY_NAME + "_best_automl",
dedicated_resources_machine_type="n1-standard-2",
dedicated_resources_min_replica_count=2,
dedicated_resources_max_replica_count=2,
traffic_split={
"0": 100
}, # newly deployed model gets 100% of the traffic
).set_caching_options(False)
)
# Sends an online prediction request to the recently deployed model using a custom component.
validate_infrastructure(
endpoint=endpoint_create_op.outputs["endpoint"]
).set_caching_options(False).after(model_deploy_automl_op)
compiler.Compiler().compile(
pipeline_func=train_pipeline,
package_path=PIPELINE_JSON_PKG_PATH,
)
vertex.init(project=PROJECT_ID, location=REGION)
pipeline_job = vertex.PipelineJob(
display_name=DISPLAY_NAME,
template_path=PIPELINE_JSON_PKG_PATH,
pipeline_root=PIPELINE_ROOT,
parameter_values=pipeline_params,
enable_caching=False,
)
response = pipeline_job.submit()
pipeline_job.wait()
vertex.init(project=PROJECT_ID, location=REGION)
delete_bucket = False
print("Will delete endpoint")
endpoints = vertex.Endpoint.list(
filter=f"display_name={DISPLAY_NAME}_endpoint", order_by="create_time"
)
endpoint = endpoints[0]
endpoint.undeploy_all()
vertex.Endpoint.delete(endpoint)
print("Deleted endpoint:", endpoint)
print("Will delete models")
suffix_list = ["bqml", "automl", "best"]
for suffix in suffix_list:
try:
model_display_name = f"{DISPLAY_NAME}_{suffix}"
print("Will delete model with name " + model_display_name)
models = vertex.Model.list(
filter=f"display_name={model_display_name}", order_by="create_time"
)
model = models[0]
vertex.Model.delete(model)
print("Deleted model:", model)
except Exception as e:
print(e)
print("Will delete Vertex dataset")
datasets = vertex.TabularDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
vertex.TabularDataset.delete(dataset)
print("Deleted Vertex dataset:", dataset)
pipelines = vertex.PipelineJob.list(
filter=f"pipeline_name={DISPLAY_NAME}", order_by="create_time"
)
pipeline = pipelines[0]
vertex.PipelineJob.delete(pipeline)
print("Deleted pipeline:", pipeline)
# Construct a BigQuery client object.
bq_client = bigquery.Client(project=PROJECT_ID, location=BQ_LOCATION)
# TODO(developer): Set dataset_id to the ID of the dataset to fetch.
dataset_id = f"{PROJECT_ID}.{BQ_DATASET}"
print(f"Will delete BQ dataset '{dataset_id}' from location {BQ_LOCATION}.")
# Use the delete_contents parameter to delete a dataset and its contents.
# Use the not_found_ok parameter to not receive an error if the dataset has already been deleted.
bq_client.delete_dataset(
dataset_id, delete_contents=True, not_found_ok=True
) # Make an API request.
print(f"Deleted BQ dataset '{dataset_id}' from location {BQ_LOCATION}.")
if delete_bucket or os.getenv("IS_TESTING"):
! gsutil rm -r $BUCKET_URI
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Before you begin
Step2: Otherwise, set your project ID here.
Step3: Timestamp
Step4: Authenticate your Google Cloud account
Step5: Create a Cloud Storage bucket
Step6: Only if your bucket doesn't already exist
Step7: Finally, validate access to your Cloud Storage bucket by examining its contents
Step8: Service Account
Step9: Set service account access for Vertex AI Pipelines
Step10: Required imports
Step11: Determine some project and pipeline variables
Step12: Downloading the data
Step13: Pipeline Components
Step16: Split Datasets
Step19: Train BQML Model
Step20: Interpret BQML Model Evaluation
Step22: Interpret AutoML Model Evaluation
Step23: Model Selection
Step24: Validate Infrastructure
Step25: The Pipeline
Step26: Running the Pipeline
Step27: Wait for the pipeline to complete
Step28: Cleaning up
|
13,295
|
<ASSISTANT_TASK:>
Python Code:
import imaginet.defn.visual as visual
import imaginet.task
model = imaginet.task.load(path="vis/model.10.zip")
reload(visual)
emb = visual.embeddings(model)
print(emb.shape)
symb = visual.symbols(model)
print " ".join(symb.values())
%pylab inline
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
xy = pca.fit_transform(emb)
pylab.rc('font', family='DejaVu Sans')
pylab.figure(figsize=(8,8))
pylab.scatter(xy[:,0], xy[:,1], alpha=0.1)
for j,symb_j in symb.items():
if symb_j not in ["<BEG>","<END>", "<UNK>"]:
pylab.text(xy[j,0], xy[j,1], symb_j)
import imaginet.defn.lm
reload(imaginet.defn.lm)
model2 = imaginet.task.load("lm/model.10.zip")
emb2 = imaginet.defn.lm.embeddings(model2)
pca2 = PCA(n_components=2)
xy = pca.fit_transform(emb2)
pylab.rc('font', family='DejaVu Sans')
pylab.figure(figsize=(8,8))
pylab.scatter(xy[:,0], xy[:,1], alpha=0.1)
for j,symb_j in symb.items():
if symb_j not in ["<BEG>","<END>", "<UNK>"]:
pylab.text(xy[j,0], xy[j,1], symb_j)
from imaginet.data_provider import getDataProvider
# Adjust the root to point to the directory above data
prov = getDataProvider('coco', root="..")
sents = list(prov.iterSentences(split='val'))
from imaginet.simple_data import phonemes
sents_ipa = [ phonemes(sent) for sent in sents ]
reps = imaginet.task.representation(model, sents_ipa)
from scipy.spatial.distance import cdist
distance = cdist(reps, reps, metric='cosine')
import numpy
def neighbors(k, distance=distance, size=5):
nn = numpy.argsort(distance[k,:])[1:size]
print sents[k]['raw'], ''.join(sents_ipa[k])
for n in nn:
print u"✔" if sents[n]['imgid']==sents[k]['imgid'] else u"✘", \
sents[n]['raw'], ''.join(sents_ipa[n])
import random
random.seed(41)
for _ in range(10):
neighbors(random.randint(0, len(sents)), distance=distance)
print
reload(visual)
reps2 = []
for i in range(0,len(sents_ipa),512):
r = [ ri[-1] for ri in imaginet.task.pile(model, sents_ipa[i:i+512], batch_size=256) ]
reps2.extend(r)
reps2 = numpy.array(reps2)
distance1 = cdist(reps2[:,1,:], reps2[:,1,:], metric='cosine')
distance0 = cdist(reps2[:,0,:], reps2[:,0,:], metric='cosine')
%reset_selective reps2 # Free memory
random.seed(41)
for _ in range(10):
i = random.randint(0, len(sents))
print "Layer 1"
neighbors(i, distance=distance0, size=2)
print "Layer 2"
neighbors(i, distance=distance1, size=2)
print "Layer 3"
neighbors(i, distance=distance, size=2)
print
import imaginet.tracer
tr = imaginet.tracer.Tracer()
tr.fit(reps)
tr.proj.explained_variance_
from subprocess import check_output
def espeak(words):
return phon(check_output(["espeak", "-q", "--ipa=3",
'-v', 'en',
words]).decode('utf-8'))
def phon(inp):
return [ ph.replace(u"ˈ","") for word in inp.split() for ph in word.split("_") ]
%pylab inline --no-import-all
def trace(orths, tracer=tr, model=model, eos=True, size=(6,6)):
ipas = [ espeak(orth) for orth in orths ]
states = imaginet.task.states(model, ipas)
pylab.figure(figsize=size)
tracer.traces(ipas, states, eos=eos)
trace(["A bowl of salad","A plate of pizza","A brown dog", "A black cat"])
trace(["a cow", "a baby cow","a tiny baby cow"])
orths = ["A cow","A baby","A tiny"]
ipas = [ espeak(orth) for orth in orths ]
states = imaginet.task.states(model, ipas)
states[0][0] - states[1][0]
trace(["some food on a table","a computer on a table","a table with food"])
pylab.axis('off')
trace(["a bear in a cage", "a brown bear in the zoo","a teddy bear on a chair"])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the model
Step2: Symbol embeddings
Step3: The table of IPA symbols corresponding to the 49 dimensions
Step4: Let's display the embeddings projected to 2D via PCA
Step5: Seems mostly random...
Step6: There is clear structure in the embeddings for the LM model.
Step7: Project sentences to state space
Step8: Find similar sentences in state space
Step9: Display neighbors for a sentence
Step10: State space for lower layers
Step11: What is encoded in different layers
Step12: Tracing the evolution of states
Step13: Use espeak to convert graphemes to phonemes
Step14: Plot traces of example sentences
|
13,296
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow.compat.v1 as tf1
import shutil
def remove_dir(path):
try:
shutil.rmtree(path)
except:
pass
def add_two(input):
return input + 2
remove_dir("saved-model-builder")
with tf.Graph().as_default() as g:
with tf1.Session() as sess:
input = tf1.placeholder(tf.float32, shape=[])
output = add_two(input)
print("add two output: ", sess.run(output, {input: 3.}))
# Save with SavedModelBuilder
builder = tf1.saved_model.Builder('saved-model-builder')
sig_def = tf1.saved_model.predict_signature_def(
inputs={'input': input},
outputs={'output': output})
builder.add_meta_graph_and_variables(
sess, tags=["serve"], signature_def_map={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: sig_def
})
builder.save()
!saved_model_cli run --dir saved-model-builder --tag_set serve \
--signature_def serving_default --input_exprs input=10
remove_dir("simple-save")
with tf.Graph().as_default() as g:
with tf1.Session() as sess:
input = tf1.placeholder(tf.float32, shape=[])
output = add_two(input)
print("add_two output: ", sess.run(output, {input: 3.}))
tf1.saved_model.simple_save(
sess, 'simple-save',
inputs={'input': input},
outputs={'output': output})
!saved_model_cli run --dir simple-save --tag_set serve \
--signature_def serving_default --input_exprs input=10
def model_fn(features, labels, mode):
output = add_two(features['input'])
step = tf1.train.get_global_step()
return tf.estimator.EstimatorSpec(
mode,
predictions=output,
train_op=step.assign_add(1),
loss=tf.constant(0.),
export_outputs={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: \
tf.estimator.export.PredictOutput({'output': output})})
est = tf.estimator.Estimator(model_fn, 'estimator-checkpoints')
# Train for one step to create a checkpoint.
def train_fn():
return tf.data.Dataset.from_tensors({'input': 3.})
est.train(train_fn, steps=1)
# This utility function `build_raw_serving_input_receiver_fn` takes in raw
# tensor features and builds an "input serving receiver function", which
# creates placeholder inputs to the model.
serving_input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(
{'input': tf.constant(3.)}) # Pass in a dummy input batch.
estimator_path = est.export_saved_model('exported-estimator', serving_input_fn)
# Estimator's export_saved_model creates a time stamped directory. Move this
# to a set path so it can be inspected with `saved_model_cli` in the cell below.
!rm -rf estimator-model
import shutil
shutil.move(estimator_path, 'estimator-model')
!saved_model_cli run --dir estimator-model --tag_set serve \
--signature_def serving_default --input_exprs input=[10]
class MyModel(tf.Module):
@tf.function
def __call__(self, input):
return add_two(input)
model = MyModel()
@tf.function
def serving_default(input):
return {'output': model(input)}
signature_function = serving_default.get_concrete_function(
tf.TensorSpec(shape=[], dtype=tf.float32))
tf.saved_model.save(
model, 'tf2-save', signatures={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_function})
!saved_model_cli run --dir tf2-save --tag_set serve \
--signature_def serving_default --input_exprs input=10
inp = tf.keras.Input(3)
out = add_two(inp)
model = tf.keras.Model(inputs=inp, outputs=out)
@tf.function(input_signature=[tf.TensorSpec(shape=[], dtype=tf.float32)])
def serving_default(input):
return {'output': model(input)}
model.save('keras-model', save_format='tf', signatures={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: serving_default})
!saved_model_cli run --dir keras-model --tag_set serve \
--signature_def serving_default --input_exprs input=10
def load_tf1(path, input):
print('Loading from', path)
with tf.Graph().as_default() as g:
with tf1.Session() as sess:
meta_graph = tf1.saved_model.load(sess, ["serve"], path)
sig_def = meta_graph.signature_def[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_name = sig_def.inputs['input'].name
output_name = sig_def.outputs['output'].name
print(' Output with input', input, ': ',
sess.run(output_name, feed_dict={input_name: input}))
load_tf1('saved-model-builder', 5.)
load_tf1('simple-save', 5.)
load_tf1('estimator-model', [5.]) # Estimator's input must be batched.
load_tf1('tf2-save', 5.)
load_tf1('keras-model', 5.)
def load_tf2(path, input):
print('Loading from', path)
loaded = tf.saved_model.load(path)
out = loaded.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY](
tf.constant(input))['output']
print(' Output with input', input, ': ', out)
load_tf2('saved-model-builder', 5.)
load_tf2('simple-save', 5.)
load_tf2('estimator-model', [5.]) # Estimator's input must be batched.
load_tf2('tf2-save', 5.)
load_tf2('keras-model', 5.)
loaded = tf.saved_model.load('tf2-save')
print('restored __call__:', loaded.__call__)
print('output with input 5.', loaded(5))
loaded_model = tf.keras.models.load_model('keras-model')
loaded_model.predict_on_batch(tf.constant([1, 3, 4]))
# Save a simple multiplication computation:
with tf.Graph().as_default() as g:
x = tf1.placeholder(tf.float32, shape=[], name='x')
v = tf.Variable(3.0, name='v')
y = tf.multiply(x, v, name='y')
with tf1.Session() as sess:
sess.run(v.initializer)
print(sess.run(y, feed_dict={x: 5}))
s = tf1.train.Saver()
s.export_meta_graph('multiply.pb', as_text=True)
s.save(sess, 'multiply_values.ckpt')
with tf.Graph().as_default() as g:
meta = tf1.train.import_meta_graph('multiply.pb')
x = g.get_tensor_by_name('x:0')
y = g.get_tensor_by_name('y:0')
with tf1.Session() as sess:
meta.restore(sess, 'multiply_values.ckpt')
print(sess.run(y, feed_dict={x: 5}))
def import_multiply():
# Any graph-building code is allowed here.
tf1.train.import_meta_graph('multiply.pb')
# Creates a tf.function with all the imported elements in the function graph.
wrapped_import = tf1.wrap_function(import_multiply, [])
import_graph = wrapped_import.graph
x = import_graph.get_tensor_by_name('x:0')
y = import_graph.get_tensor_by_name('y:0')
# Restore the variable values.
tf1.train.Saver(wrapped_import.variables).restore(
sess=None, save_path='multiply_values.ckpt')
# Create a concrete function by pruning the wrap_function (similar to sess.run).
multiply_fn = wrapped_import.prune(feeds=x, fetches=y)
# Run this function
multiply_fn(tf.constant(5.)) # inputs to concrete functions must be Tensors.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Migrate the SavedModel workflow
Step2: TensorFlow 1
Step3: 2. Build a SavedModel for serving
Step4: 3. Export the Estimator inference graph as a SavedModel
Step5: TensorFlow 2
Step6: Save and export a SavedModel defined with Keras
Step7: Loading a SavedModel
Step8: TensorFlow 2
Step9: Models saved with the TensorFlow 2 API can also access tf.functions and variables that are attached to the model (instead of those exported as signatures). For example
Step10: TensorFlow 2
Step11: GraphDef and MetaGraphDef
Step12: Using TF1 APIs, you can use tf1.train.import_meta_graph to import the graph and restore the values
Step13: There are no TF2 APIs for loading the graph, but you can still import it into a concrete function that can be executed in eager mode
|
13,297
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import numpy.linalg as la
from scipy.stats import rankdata
%matplotlib inline
## Construct a regression model
def lm_model( X, Y, intercept = True ) :
T = np.array( Y, dtype = float )
M = np.array( X, dtype = float )
if intercept is True :
M = np.vstack( [ np.ones( len( Y ) ), M ] ).T
return (M,T, intercept)
## Define the OLS regression routine:
def lm_fit( model ) :
M, T, intercept = model
MMinv = la.inv( ## implement (X'X)^{-1} (X'Y)
np.dot( M.T, M ) )
coef = np.dot( MMinv,
np.dot( M.T, T ) )
## Estimate the residual standard deviation
resid = T - np.dot(M, coef)
dof = len( T ) - len( coef )
RSS = np.dot( resid.T, resid )
return (coef, RSS, dof, MMinv )
## ML estimator of the power law in the "tail" (x≥u):
## x_k \sim C x^{-\alpha} 1_{[u,+∞)}(x).
def mle_alpha( data, threshold ) :
## Keep the data observations, that we consider to be in the tail
tail = np.array( [ v for v in data if v >= threshold ] )
## Estimate the mean log of the peaks over threshold
sum_log = np.sum( np.log( tail ) ) / ( len( tail ) + 0.0 )
## Use the closed form expression for the value of the power at an optimum
alpha = 1.0 + 1.0 / ( sum_log - np.log( threshold ) )
## Using the delta-method compute the s.e of the estimate.
return alpha, ( alpha - 1 ) / np.sqrt( len( tail ) )
## The function below implements the same functionality as the previous one
## but instead of the continuous version it works with the discrete power law.
from scipy.special import zeta
from scipy.optimize import minimize
## The discrete power law gives marginally different results
## \Pr(N=n) \defn \frac{1}{\zeta(\gamma)} n^{-\gamma}, n -- positive integer
def mle_alpha_d( data, threshold ) :
## Keep the data observations, that we consider to be in the tail
tail = np.array( [ v for v in data if v >= threshold ] )
## Estimate the mean log of the peaks over threshold
sum_log = np.sum( np.log( tail - threshold + 1 ) ) / ( len( tail ) + 0.0 )
## Define minus log-likelihood of the discrete power law
loglik = lambda alpha : np.log( zeta( alpha ) ) + alpha * sum_log
## Compute the ML estimate of the exponent, with a view to using it as the
## initial seed for the numerical minimizer for better convergence.
res = minimize( loglik, ( 1.0 + 1.0 / sum_log, ), method = 'Nelder-Mead', options = { 'disp': False } )
## Return the "optimal" argument, regardless of its quality. Potentially DANGEROUS!
return res.x[ 0 ], float( 'nan' )
## Define a convenience function for estimating the power parameter
## of the continuous power law
from scipy.stats import kstest
def ks_dist( data, threshold ) :
## Estimate the power given the current threshold
alpha, sd = mle_alpha( data, threshold )
## Construct the CDF in the current environment
cdf = lambda x : 1.0 - ( x / threshold ) ** ( 1.0 - alpha )
## Return the output of the out-of-the box Kolmogorov-Smirnov test:
## the infinity norm of the difference between the distribution functions.
d, pv = kstest( [ v for v in data if v >= threshold ], cdf )
return (d, pv), (alpha, sd)
def ks_dist_d( data, threshold ) :
## Estimate the power given the current threshold
alpha, sd = mle_alpha_d( data, threshold )
## Construct the CDF in the current environment
cdf = lambda k : 1.0 - zeta( alpha, k-threshold+1 ) / zeta( alpha )
## Return the output of the out-of-the box Kolmogorov-Smirnov test:
## the infinity norm of the difference between the distribution functions.
d, pv = kstest( [ v for v in data if v >= threshold ], cdf )
return (d, pv), (alpha, sd)
def values( data, frequency = False ) :
bins = dict( )
## For each value in the given array, add the index of each occurrence
## into the bin dedicated to the encountered value.
for i, x in enumerate( sorted( data ) ) :
## Prepend the current occurrence of a value, unless it has never been
## seen before, in which case initialise the list of indices for it.
bins[ x ] = bins.get( x, [] ) + [ i ]
return bins
## It was brought to my attention, that numpy.unique() does the same trick...
def counts( data ) :
## Count the number of times a value occurs in the array.
counts = dict( )
for x in data :
## If the values has not been seen yet, then initialize it to
## a single occurrence otherwise increment its counter.
counts[ x ] = counts.get( x, 0 ) + 1
return counts.items( )
## Construct the complimentary cumulative distribution function for
## the data exceedig the given tail threshold.
def ccdf( data, threshold ) :
## Count the occurrences of values over some threshold in the array
freq = np.array( counts(
[ v for v in data if v >= threshold ] ),
dtype = float )
## Sort the counts along the growing values they correspond to
freq = freq[ freq[ :, 0 ].argsort( ), : ]
## ... and compute the fraction of data with values lower than the current
freq[:,1] = 1.0 - np.cumsum( freq[ :,1 ], dtype = float ) / sum( freq[ :,1 ] )
return freq
def mean_excess( data ) :
data = np.array( sorted( data, reverse = True ) )
## Compute the last positions in the sorted array of each repeated observation
ranks = rankdata( data, method = 'max' )
## Since the array is sorted, the number of observation exceeding the current
## is givne by difference between the length of the array and the max-rank.
excesses = np.array( np.unique( len( data ) - ranks ), dtype = np.int )
## Get the last values in each group -- the thresholds
thresholds = data[ excesses ]
## Get the sum of all values greater than the current threshold
mean_excess = np.cumsum( data )[ excesses ] / ( excesses + 0.0 ) - thresholds
return np.array( zip( thresholds, mean_excess ), dtype = np.float )
#####################################################################
#+ 0. Load the data (yes, it is a milestone!)
## Load the word count dataset
wordcount = np.fromregex(
'./data/wordcounts.txt', r"(\d+)\s+(.{,32})",
[ ( 'freq', np.int64 ), ( 'word', 'S32' ) ] )
#####################################################################
##+ 1. Check that Zipf's Law holds
## Pre-sort the frequencies: in ascending order of frequencies
wordcount.sort( order = 'freq' )
freqs = wordcount[ 'freq' ]
## PRoduce ranks: from 1 up to |W|
ranks = np.arange( 1, len( wordcount ) + 1, dtype = float )[::-1]
## The probability of a word frequency being not less than the
## frequency of a gien word w it exactly the ratio of the w's rank
## to the total number of words.
probs = ranks / len( wordcount )
## estimate f_k\sim C k^{-\gamma} model
mdl = lm_model( np.log( ranks ), np.log( freqs ), True )
coef, rss, dof, XX = lm_fit( mdl )
## Define the fitted Zipf's law
# zipf = lambda r : np.exp( coef.dot( ( 1, np.log( r ) ) ) )
zipf = lambda r : np.exp( coef[0] + coef[1] * np.log( r ) )
## Show how well is was estimated.
plt.loglog( freqs, probs, "xr" )
plt.plot( zipf( ranks ), probs, "-b" )
plt.xlabel( "frequency" ) ; plt.ylabel( "ranks" )
plt.title( "Wordcount data" )
plt.show( )
######################################################################
##+ 2. Assuming that the data is distributed according to the Power Law, find
## * $\alpha$ of the distribution
## * mean sample variance $\sigma^2$
## Get the ML estimate
alpha_ml, alpha_ml_sd = mle_alpha( freqs, freqs.min( ) )
## Let's suppose that the rank is proportional to the complementary CDF
## of a power law: $\bar{F}(x) = {\left(\frac{x}{u}\right)}^{1-\alpha}$
## Thus the following econometric model is to be estimated:
## $\log \text{rank} \sim C + (1-\alpha) \log \text{freq} + \epsilon$
mdl = lm_model( np.log( freqs ), np.log( ranks ), True )
beta, rss, dof, XX = lm_fit( mdl )
## Transform the coefficient
alpha_ls = 1 - beta[ 1 ]
## The regression estimate of the power should be close
## to the ML estimate
print "the OLS estimate of alpha is %f\n" % alpha_ls
print "Whereas the ML estimate is %f (%f) \n" % ( alpha_ml, alpha_ml_sd )
print "Since ML is more theoretically sound, the relative error is %f%%\n" % (
100 * np.abs( 1.0 - alpha_ls / alpha_ml ), )
## The mean and the sample variance of the sample
## frequency distribution:
print "The average frequency over the sample is ", freqs.mean(), "\n"
print "The sample variance is ", freqs.var(), "\n"
## Theoretical mean and variance of the power law distribution
## significantly depend on the power parameter.
## Indeed for $x\sim \frac{\alpha-1}{u} {\left( \frac{x}{u} \right)}^\alpha$ one has the following:
## $E(x) = \frac{\alpha-1}{\alpha-2} u$ if $\alpha>2$
## $E(x^2) = \frac{\alpha-1}{\alpha-3} u^2$ if $\alpha>3$
## The estimated parameter is less than 2, implying that the frequency
## distribution is unlikely to have even a finite mean under the
## assumed distribution.
#####################################################################
##+ 3. Produce summary of the frequencies: min, max, mean, median
## Does it make sense to compute these summaries? What does the mean frequency tell us?
print "The minimum frequency is ", freqs.min(), "\n"
print "The mean frequency is ", freqs.mean(), "\n"
print "The median frequency is ", np.median( freqs ), "\n"
print "The maximum frequency is ", freqs.max(), "\n"
#####################################################################
## + 0. Read the graph
## Load the network routing graph first as it is the smallest. It is
## an undirected graph.
import networkx as nx
G = nx.read_edgelist( "./data/network.txt", create_using = nx.Graph( ) );
node_degree = G.degree( )
deg = np.array( node_degree.values( ), dtype = np.int )
#####################################################################
##+ 1. Are they correspondent to power law?
## First let's draw the frequency plot of the node degree distribution.
degree_freq = np.array( counts( deg ) )
deg_me = mean_excess( deg )
plt.figure( 1, figsize = ( 10, 5 ) )
plt.subplot(121)
plt.title( "Node degree frequency" )
plt.loglog( degree_freq[:,0], degree_freq[:,1], "bo" )
plt.xlabel( "degree" ) ; plt.ylabel( "frequency" )
plt.subplot(122)
## An upward trend in plot shows heavy-tailed behaviour, but the
## values for high thresholds are unreliably estimated.
plt.title( "Mean excess plot" )
plt.loglog( deg_me[:,0], deg_me[:,1], "bo-", linewidth = 2 )
plt.ylabel( "mean excess" ) ; plt.xlabel( "threshold" )
plt.show( )
## The empirical degree distribution may not correspond to a power
## law per se, but it definitely has some heavy tailed behaviour,
## which exhibits itself, when the only data exceeding same truncated
## is considered.
cc = ccdf( deg, 0 )
plt.title( "Degree cCDF" )
plt.xlabel( "degree" ) ; plt.ylabel( "probability" )
plt.loglog( cc[:,0], cc[:,1], "bo-", linewidth = 2 )
plt.show( )
## Clearly the chances of an extremely high node degree decay proportional
## to the value of the degree on a log-log scale.
#####################################################################
##+ 2. Find max and mean values of incoming and outcoming node degrees
## Since the network graph is undirected it does not make sense to
## distinguish in- and out- nodes. Thus let's check the range of the
## general (two-way) degree.
print "The degrees range from %d to %d" % ( min( deg ), max( deg ) ) #, "\n"
print "The average degree over the sample is %.3f" % ( G.size( ) / G.order( ) ) #, "\n"
print "The degree standard deviation is %.3f" % ( np.sqrt( np.var( deg ) ) ) #, "\n"
print "The median degree is %d" % ( np.median( deg ) ) #, "\n"
#####################################################################
##+ 3. Find $\alpha$ via Maximum Likelihood and calculate $\sigma^2$
##+ 4. Determine $x_{min}$ via Kolmogorov-Smirnov test
## We have reasons to believe there are some power law-like effects in
## the behaviour of the node degree (treated as a random variable).
## Let's pursue this lead and estimate the exponent in the power law
## and select the most likely breakpoint, beyond which the degree
## is heavy tailed.
#####################################################################
## Get the ML estimate of the exponent parameter.
alpha_ml, alpha_ml_se = mle_alpha( deg, min( deg ) )
print "The Maximum likelihood estimate of the exponent of the node degree distribution is %.3f (%.4f)\n" % ( alpha_ml, alpha_ml_se )
#####################################################################
## Run the KS threshold selection routine
thresholds = np.unique( deg )
## The ks_dist() function returns a tuple of the following parameters:
## * ( KS-distance, PV of the KS-test ), ( MLE of alpha, the standard error of the MLE )
ks_min = np.array( [ ks_dist( deg, u ) for u in thresholds ] )
## Select the x_min that brings the KS metric to its minimum on the given
## degree data. Note the first threshold is removed, since it is likely
## to yield very biased estimate.
i_min = np.argmin( ks_min[1:,0,0] )+1
x_min = thresholds[ i_min ]
alpha_ml, alpha_ml_se = ks_min[ i_min, 1, : ]
## Produce a dataset for cCDF plotting.
x = np.arange( x_min, 2 * np.max( deg ) )
deg_ccdf = ccdf( deg, x_min )
pwr_ccdf = lambda x : ( x / ( x_min + 0.0 ) ) ** ( 1.0 - alpha_ml )
## Produce the hill plot: the correspondence between the threshold
## and the estimated exponent.
plt.figure( 1, figsize = ( 10, 5 ) )
plt.subplot( 121 )
plt.title( 'The Hill plot of the degree distribution' )
plt.ylabel( 'alpha' ) ; plt.ylabel( 'threshold' )
plt.axhline( y = alpha_ml, linewidth = 1, color = 'b' )
plt.axvline( x = x_min, linewidth = 1, color = 'b', linestyle = '--' )
plt.loglog( thresholds, ks_min[:,1,0], "r-")
## In fact the KS-metric is the $L^\infty$ norm on the set of distribution
## functions.
plt.subplot( 122 )
plt.title( 'KS metric distance' )
plt.ylabel( 'max distance' ) ; plt.ylabel( 'threshold' )
plt.axhline( y = ks_min[ i_min, 0, 0 ], linewidth = 1, color = 'b' )
plt.axvline( x = x_min, linewidth = 1, color = 'b', linestyle = '--' )
plt.loglog( thresholds, ks_min[:,0,0], "r-")
plt.show( )
print "The Kolmogorov-Smirnov metric yielded %.1f as the optimal threshold\n" % ( x_min)
print "'Optimal' exponent is %.3f (%.3f)\n" % ( alpha_ml, alpha_ml_se )
plt.title( "Degree cCDF" )
plt.xlabel( "degree" ) ; plt.ylabel( "probability" )
plt.plot( x, pwr_ccdf( x ), "b-", linewidth = 2 )
plt.plot( deg_ccdf[:,0], deg_ccdf[:,1], "r-", linewidth = 2 )
plt.axvline( x = x_min, linewidth = 2, color = 'k', linestyle = '-' )
plt.show( )
#####################################################################
## + 0. Read the graph
## Load the network routing graph first as it is the smallest. It is
## an undirected graph.
import networkx as nx
G = nx.read_edgelist( "./data/fb_Princeton.txt", create_using = nx.DiGraph( ) );
node_in_degree = G.in_degree( )
node_out_degree = G.out_degree( )
in_deg = np.array( node_in_degree.values( ), dtype = np.int )
out_deg = np.array( node_out_degree.values( ), dtype = np.int )
#####################################################################
##+ 1. Are they correspondent to power law?
## First let's draw the frequency plot of the node degree distribution.
degree_in_freq = np.array( counts( in_deg ) )
degree_out_freq = np.array( counts( out_deg ) )
plt.title( "Node degree frequency" )
plt.xlabel( "degree" ) ; plt.ylabel( "frequency" )
plt.loglog( degree_out_freq[:,0], degree_out_freq[:,1], "bo" )
plt.loglog( degree_in_freq[:,0], degree_in_freq[:,1], "r<" )
plt.show( )
plt.figure( 1, figsize = ( 10, 5 ) )
plt.subplot(121)
plt.title( "Degree cCDF-loglog" )
out_cc = ccdf( out_deg, 0 )
plt.loglog( out_cc[:,0], out_cc[:,1], "bo-", linewidth = 2 )
in_cc = ccdf( in_deg, 0 )
plt.loglog( in_cc[:,0], in_cc[:,1], "r<-", linewidth = 2 )
plt.xlabel( "degree" ) ; plt.ylabel( "probability" )
plt.subplot(122)
## An upward trend in plot shows heavy-tailed behaviour, but the
## values for high thresholds are unreliably estimated.
plt.title( "Mean excess plot" )
out_me = mean_excess( out_deg )
plt.loglog( out_me[:,0], out_me[:,1], "bo-", linewidth = 2 )
in_me = mean_excess( in_deg )
plt.loglog( in_me[:,0], in_me[:,1], "r<-", linewidth = 2 )
plt.ylabel( "mean excess" ) ; plt.xlabel( "threshold" )
plt.show( )
#####################################################################
##+ 2. Find max and mean values of incoming and outcoming node degrees
print "The degrees range from %d to %d for inward direction and from %d to %d for outward edges" % ( min( in_deg ), max( in_deg ), min( out_deg ), max( out_deg ) ) #, "\n"
print "The average degree over the sample is %.3f (IN) and %.3f (OUT)" % ( np.sum( in_deg ) / ( G.order( ) + 0.0 ), np.sum( out_deg ) / ( G.order( ) + 0.0 ) ) #, "\n"
print "The degree standard deviation is %.3f for the in-degree and %.3f -- out-degree" % ( np.sqrt( np.var( in_deg ) ), np.sqrt( np.var( out_deg ) ) ) #, "\n"
print "The median in- and out-degree is %d and %d respectively" % ( np.median( in_deg ), np.median( out_deg ) ) #, "\n"
#####################################################################
##+ 3. Find $\alpha$ via Maximum Likelihood and calculate $\sigma^2$
##+ 4. Determine $x_{min}$ via Kolmogorov-Smirnov test
#####################################################################
## Get the ML estimate of the exponent parameter. There are some isolated
## nodes in the provided graph, which means that it is necessary
## to omit these nodes from the analysis using a simple power law.
## One of course could try to fit a model with an explicit atom at zero,
## but that should wait for a better time.
in_alpha_ml, in_alpha_ml_se = mle_alpha( in_deg, min( in_deg )+1 )
out_alpha_ml, out_alpha_ml_se = mle_alpha( out_deg, min( out_deg )+1 )
#####################################################################
in_thresholds = np.unique( in_deg )
out_thresholds = np.unique( out_deg )
## Run the KS threshold selection routine
in_ks_min = np.array( [ ks_dist( in_deg, u ) for u in in_thresholds ] )
out_ks_min = np.array( [ ks_dist( out_deg, u ) for u in out_thresholds ] )
## Select the x_min that brings the KS metric to its minimum on the given
## degree data. Note the first threshold is removed, since it is likely
## to yield very biased estimate.
in_i_min = np.argmin( in_ks_min[1:,0,0] )+1
out_i_min = np.argmin( out_ks_min[1:,0,0] )+1
## Produce a dataset for cCDF plotting.
in_x = np.arange( in_thresholds[ in_i_min ], 2 * np.max( in_deg ) )
out_x = np.arange( out_thresholds[ out_i_min ], 2 * np.max( out_deg ) )
## Get the empirical complementary distribution fuction.
in_deg_ccdf = ccdf( in_deg, in_thresholds[ in_i_min ] )
out_deg_ccdf = ccdf( out_deg, out_thresholds[ out_i_min ] )
## ... and the fitted power law.
in_pwr_ccdf = lambda x : ( x / ( in_thresholds[ in_i_min ] + 0.0 ) ) ** ( 1.0 - in_ks_min[ in_i_min, 1, 0 ] )
out_pwr_ccdf = lambda x : ( x / ( out_thresholds[ out_i_min ] + 0.0 ) ) ** ( 1.0 - out_ks_min[ out_i_min, 1, 0 ] )
print "The MLE of the exponent of the inward and outward degree distribution is %.3f (%.4f) and %.3f (%.4f) respectively\n" % ( in_alpha_ml, in_alpha_ml_se, out_alpha_ml, out_alpha_ml_se )
## Produce the hill plot: the correspondence between the threshold
## and the estimated exponent.
plt.figure( 1, figsize = ( 10, 5 ) )
plt.subplot(121)
plt.title( 'The Hill plot of the degree distribution' )
plt.axhline( y = in_ks_min[ in_i_min, 1, 0 ], linewidth = 1, color = 'r' )
plt.axvline( x = in_thresholds[ in_i_min ], linewidth = 1, color = 'r', linestyle = '--' )
plt.loglog( in_thresholds, in_ks_min[:,1,0], "r<-")
plt.axhline( y = out_ks_min[ out_i_min, 1, 0 ], linewidth = 1, color = 'b' )
plt.axvline( x = out_thresholds[ out_i_min ], linewidth = 1, color = 'b', linestyle = '--' )
plt.loglog( out_thresholds, out_ks_min[:,1,0], "bo-")
plt.ylabel( 'alpha' ) ; plt.ylabel( 'threshold' )
## In fact the KS-metric is the $L^\infty$ norm on the set of distribution
## functions.
plt.subplot(122)
plt.title( 'The KS metric distance' )
plt.axhline( y = in_ks_min[ in_i_min,0, 0 ], linewidth = 1, color = 'r' )
plt.axvline( x = in_thresholds[ in_i_min ], linewidth = 1, color = 'r', linestyle = '--' )
plt.loglog( in_thresholds, in_ks_min[:,0,0], "r<-")
plt.axhline( y = out_ks_min[ out_i_min,0, 0 ], linewidth = 1, color = 'b' )
plt.axvline( x = out_thresholds[ out_i_min ], linewidth = 1, color = 'b', linestyle = '--' )
plt.loglog( out_thresholds, out_ks_min[:,0,0], "bo-")
plt.ylabel( 'max distance' ) ; plt.ylabel( 'threshold' )
plt.show( )
print "OUT-degree: The Kolmogorov-Smirnov metric yielded %.1f as the optimal threshold and %.3f (%.3f) as 'optimal' exponent\n" % ( out_thresholds[ out_i_min ], out_ks_min[ out_i_min, 1, 0 ], out_ks_min[ out_i_min, 1, 1 ] )
print "IN-degree: The Kolmogorov-Smirnov metric yielded %.1f as the optimal threshold and %.3f (%.3f) as 'optimal' exponent\n" % ( out_thresholds[ in_i_min ], in_ks_min[ in_i_min, 1, 0 ], in_ks_min[ in_i_min, 1, 1 ] )
plt.figure( 3, figsize = ( 10, 5 ) )
plt.subplot(121)
plt.title( "Out degree cCDF" )
plt.plot( out_x, out_pwr_ccdf( out_x ), "k-", linewidth = 2 )
plt.plot( out_deg_ccdf[:,0], out_deg_ccdf[:,1], "bo-", linewidth = 2 )
plt.xlabel( "degree" ) ; plt.ylabel( "probability" )
plt.subplot(122)
plt.title( "In degree cCDF" )
plt.plot( in_x, in_pwr_ccdf( in_x ), "k-", linewidth = 2 )
plt.plot( in_deg_ccdf[:,0], in_deg_ccdf[:,1], "r<-", linewidth = 2 )
plt.xlabel( "degree" ) ; plt.ylabel( "probability" )
plt.show( )
#####################################################################
## + 0. Read the graph
import networkx as nx
G = nx.read_edgelist( "./data/web_Stanford.txt", create_using = nx.DiGraph( ) );
node_in_degree = G.in_degree( )
node_out_degree = G.out_degree( )
in_deg = np.array( node_in_degree.values( ), dtype = np.int )
out_deg = np.array( node_out_degree.values( ), dtype = np.int )
#####################################################################
##+ 1. Are they correspondent to power law?
degree_in_freq = np.array( counts( in_deg ) )
degree_out_freq = np.array( counts( out_deg ) )
plt.title( "Node degree frequency" )
plt.xlabel( "degree" ) ; plt.ylabel( "frequency" )
plt.loglog( degree_out_freq[:,0], degree_out_freq[:,1], "bo" )
plt.loglog( degree_in_freq[:,0], degree_in_freq[:,1], "r<" )
plt.show( )
plt.figure( 1, figsize = ( 10, 5 ) )
plt.subplot(121)
plt.title( "Degree cCDF-loglog" )
out_cc = ccdf( out_deg, 0 )
plt.loglog( out_cc[:,0], out_cc[:,1], "bo-", linewidth = 2 )
in_cc = ccdf( in_deg, 0 )
plt.loglog( in_cc[:,0], in_cc[:,1], "r<-", linewidth = 2 )
plt.xlabel( "degree" ) ; plt.ylabel( "probability" )
plt.subplot(122)
plt.title( "Mean excess plot" )
out_me = mean_excess( out_deg )
plt.loglog( out_me[:,0], out_me[:,1], "bo-", linewidth = 2 )
in_me = mean_excess( in_deg )
plt.loglog( in_me[:,0], in_me[:,1], "r<-", linewidth = 2 )
plt.ylabel( "mean excess" ) ; plt.xlabel( "threshold" )
plt.show( )
#####################################################################
##+ 2. Find max and mean values of incoming and outcoming node degrees
print "The degrees range from %d to %d for inward direction and from %d to %d for outward edges" % ( min( in_deg ), max( in_deg ), min( out_deg ), max( out_deg ) ) #, "\n"
print "The average degree over the sample is %.3f (IN) and %.3f (OUT)" % ( np.sum( in_deg ) / ( G.order( ) + 0.0 ), np.sum( out_deg ) / ( G.order( ) + 0.0 ) ) #, "\n"
print "The degree standard deviation is %.3f for the in-degree and %.3f -- out-degree" % ( np.sqrt( np.var( in_deg ) ), np.sqrt( np.var( out_deg ) ) ) #, "\n"
print "The median in- and out-degree is %d and %d respectively" % ( np.median( in_deg ), np.median( out_deg ) ) #, "\n"
#####################################################################
##+ 3. Find $\alpha$ via Maximum Likelihood and calculate $\sigma^2$
##+ 4. Determine $x_{min}$ via Kolmogorov-Smirnov test
in_alpha_ml, in_alpha_ml_se = mle_alpha( in_deg, min( in_deg )+1 )
out_alpha_ml, out_alpha_ml_se = mle_alpha( out_deg, min( out_deg )+1 )
in_thresholds = np.unique( in_deg )
out_thresholds = np.unique( out_deg )
## Run the KS threshold selection routine
in_ks_min = np.array( [ ks_dist( in_deg, u ) for u in in_thresholds ] )
out_ks_min = np.array( [ ks_dist( out_deg, u ) for u in out_thresholds ] )
## Select the x_min that brings the KS metric to its minimum on the given
## degree data.
in_i_min = np.argmin( in_ks_min[1:,0,0] )+1
out_i_min = np.argmin( out_ks_min[1:,0,0] )+1
## Produce a dataset for cCDF plotting.
in_x = np.arange( in_thresholds[ in_i_min ], 2 * np.max( in_deg ) )
out_x = np.arange( out_thresholds[ out_i_min ], 2 * np.max( out_deg ) )
## Get the empirical complementary distribution fuction.
in_deg_ccdf = ccdf( in_deg, in_thresholds[ in_i_min ] )
out_deg_ccdf = ccdf( out_deg, out_thresholds[ out_i_min ] )
## ... and the fitted power law.
in_pwr_ccdf = lambda x : ( x / ( in_thresholds[ in_i_min ] + 0.0 ) ) ** ( 1.0 - in_ks_min[ in_i_min, 1, 0 ] )
out_pwr_ccdf = lambda x : ( x / ( out_thresholds[ out_i_min ] + 0.0 ) ) ** ( 1.0 - out_ks_min[ out_i_min, 1, 0 ] )
print "The MLE of the exponent of the inward and outward degree distribution is %.3f (%.4f) and %.3f (%.4f) respectively\n" % ( in_alpha_ml, in_alpha_ml_se, out_alpha_ml, out_alpha_ml_se )
## Produce the hill plot: the correspondence between the threshold
## and the estimated exponent.
plt.figure( 1, figsize = ( 10, 5 ) )
plt.subplot(121)
plt.title( 'The Hill plot of the degree distribution' )
plt.axhline( y = in_ks_min[ in_i_min, 1, 0 ], linewidth = 1, color = 'r' )
plt.axvline( x = in_thresholds[ in_i_min ], linewidth = 1, color = 'r', linestyle = '--' )
plt.loglog( in_thresholds, in_ks_min[:,1,0], "r<-")
plt.axhline( y = out_ks_min[ out_i_min, 1, 0 ], linewidth = 1, color = 'b' )
plt.axvline( x = out_thresholds[ out_i_min ], linewidth = 1, color = 'b', linestyle = '--' )
plt.loglog( out_thresholds, out_ks_min[:,1,0], "bo-")
plt.ylabel( 'alpha' ) ; plt.ylabel( 'threshold' )
plt.subplot(122)
plt.title( 'The KS metric distance' )
plt.axhline( y = in_ks_min[ in_i_min,0, 0 ], linewidth = 1, color = 'r' )
plt.axvline( x = in_thresholds[ in_i_min ], linewidth = 1, color = 'r', linestyle = '--' )
plt.loglog( in_thresholds, in_ks_min[:,0,0], "r<-")
plt.axhline( y = out_ks_min[ out_i_min,0, 0 ], linewidth = 1, color = 'b' )
plt.axvline( x = out_thresholds[ out_i_min ], linewidth = 1, color = 'b', linestyle = '--' )
plt.loglog( out_thresholds, out_ks_min[:,0,0], "bo-")
plt.ylabel( 'max distance' ) ; plt.ylabel( 'threshold' )
plt.show( )
print "OUT-degree: The Kolmogorov-Smirnov metric yielded %.1f as the optimal threshold and %.3f (%.3f) as 'optimal' exponent\n" % ( out_thresholds[ out_i_min ], out_ks_min[ out_i_min, 1, 0 ], out_ks_min[ out_i_min, 1, 1 ] )
print "IN-degree: The Kolmogorov-Smirnov metric yielded %.1f as the optimal threshold and %.3f (%.3f) as 'optimal' exponent\n" % ( in_thresholds[ in_i_min ], in_ks_min[ in_i_min, 1, 0 ], in_ks_min[ in_i_min, 1, 1 ] )
plt.figure( 3, figsize = ( 10, 5 ) )
plt.subplot(121)
plt.title( "Out degree cCDF" )
plt.plot( out_x, out_pwr_ccdf( out_x ), "k-", linewidth = 2 )
plt.plot( out_deg_ccdf[:,0], out_deg_ccdf[:,1], "bo-", linewidth = 2 )
plt.xlabel( "degree" ) ; plt.ylabel( "probability" )
plt.subplot(122)
plt.title( "In degree cCDF" )
plt.plot( in_x, in_pwr_ccdf( in_x ), "k-", linewidth = 2 )
plt.plot( in_deg_ccdf[:,0], in_deg_ccdf[:,1], "r<-", linewidth = 2 )
plt.xlabel( "degree" ) ; plt.ylabel( "probability" )
plt.show( )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A continusous random variable $X$ is distributed accorind to the power law (also known as Pareto distibution) if it's probability density function is $$p(x) = \frac{\alpha-1}{u} {\bigg (\frac{x}{u} \bigg )}^{-\alpha} 1_{[u,+\infty)} (x)$$
Step2: Selecting an optimal threshold, beyond which the power-law like tail behaviour is expected, which adequately balances between the bias and the variance, is very important. As suggested in ... this task is preformed well by employing the statistic in the Kolmogorov-Smirnov goodness-of-fit test. The statistic itself is the $L^\infty$ norm of the difference between the hypothesised distribution function and the observed (empirical) CDF.
Step3: These helper functions invert an array and count the number of occurrences of distinct values in an array.
Step4: Mean excess plot is a visual tool that helps determine the tail-type behavoiur from the sample data. Basically it is just the plot of the sample mean of values exceeding some threshold.
Step5: Problems
Step6: <hr /> Task 2.
Step7: The mean excess plot has an unmistakeable upward trend throughout the whole set of thresholds. This is strong heuristic evidence for a heavy tail in the node degree distribution.
Step8: Visualize the dependence of $\alpha$ and the KS statistic on the threshold $u$.
Step9: The estimated theoretical and the empirical CDFs are quite well aligned with each other for the chosen threshold. The tail of the hypothesised node degree law appears to have a higher tail decay rate, but that is due to the common severe undersampling of the tails.
Step10: Facebook graph
Step11: The ME plots for both the in- and the out-degrees possess significantly long flat regions, and only at the high end of the thresholds do they "explode" into a singularity. Such behaviour hints at the possibilty of an exponential tail in the distributions of both inward and outward vertex degrees.
Step12: Hill plots (the estimated exponent $\hat{\alpha}_u$ against the employed threshold $u$) have a distinct upward curving trend, which can only be a result of an exponential behaviour in the tail of both the in- and the out-degreee distributions.
Step13: Indeed, both complimetary CDFs show decay rates faster than the power law.
Step14: The ME plot a very common case, when it is not quite clear whether mean excesses have an upward trand or not. Neglecting the upper thresholds, both distributions behave concictently with a heavy tailed distiribution. However, the tail of a distribution by definition the asymptotic behaviour for increasing threshold. This means that one must needs look at the unstable estimates of the conditional mean at the right end of the threshold range.
|
13,298
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('/home/jbourbeau/cr-composition')
print('Added to PYTHONPATH')
import argparse
from collections import defaultdict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn.apionly as sns
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score, learning_curve
import composition as comp
import composition.analysis.plotting as plotting
# Plotting-related
sns.set_palette('muted')
sns.set_color_codes()
color_dict = {'P': 'b', 'He': 'g', 'Fe': 'm', 'O': 'r'}
%matplotlib inline
df_sim, cut_dict_sim = comp.load_dataframe(type_='sim', config='IT73', return_cut_dict=True)
selection_mask = np.array([True] * len(df_sim))
standard_cut_keys = ['lap_reco_success', 'lap_zenith', 'num_hits_1_30', 'IT_signal',
'max_qfrac_1_30', 'lap_containment', 'energy_range_lap']
for key in standard_cut_keys:
selection_mask *= cut_dict_sim[key]
df_sim = df_sim[selection_mask]
feature_list, feature_labels = comp.get_training_features()
print('training features = {}'.format(feature_list))
X_train_sim, X_test_sim, y_train_sim, y_test_sim, le = comp.get_train_test_sets(
df_sim, feature_list, comp_class=True, train_he=True, test_he=True)
print('number training events = ' + str(y_train_sim.shape[0]))
print('number testing events = ' + str(y_test_sim.shape[0]))
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
pipeline = comp.get_pipeline('RF')
sfs = SFS(pipeline,
k_features=6,
forward=True,
floating=False,
scoring='accuracy',
print_progress=True,
cv=3,
n_jobs=10)
sfs = sfs.fit(X_train_sim, y_train_sim)
X_train_sim = sfs.transform(X_train_sim)
X_test_sim = sfs.transform(X_test_sim)
pipeline = comp.get_pipeline('RF')
train_sizes, train_scores, test_scores =\
learning_curve(estimator=pipeline,
X=X_train_sim,
y=y_train_sim,
train_sizes=np.linspace(0.1, 1.0, 10),
cv=10,
n_jobs=20,
verbose=3)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(train_sizes, train_mean,
color='b', linestyle='-',
marker='o', markersize=5,
label='training accuracy')
plt.fill_between(train_sizes,
train_mean + train_std,
train_mean - train_std,
alpha=0.15, color='b')
plt.plot(train_sizes, test_mean,
color='g', linestyle='--',
marker='s', markersize=5,
label='validation accuracy')
plt.fill_between(train_sizes,
test_mean + test_std,
test_mean - test_std,
alpha=0.15, color='g')
plt.grid()
plt.xlabel('Number of training samples')
plt.ylabel('Accuracy')
plt.title('RF Classifier')
plt.legend()
# plt.ylim([0.8, 1.0])
plt.tight_layout()
plt.show()
pipeline.named_steps['classifier']
fig, axarr = plt.subplots(2, 2)
for max_depth, ax in zip([2, 5, 6, 10], axarr.flatten()):
print('max_depth = {}'.format(max_depth))
pipeline = comp.get_pipeline('RF')
params = {'classifier__max_depth': max_depth}
pipeline.set_params(**params)
train_sizes, train_scores, test_scores =\
learning_curve(estimator=pipeline,
X=X_train_sim,
y=y_train_sim,
train_sizes=np.linspace(0.1, 1.0, 10),
cv=10,
n_jobs=20,
verbose=0)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
ax.plot(train_sizes, train_mean,
color='b', linestyle='-',
marker='o', markersize=5,
label='training accuracy')
ax.fill_between(train_sizes,
train_mean + train_std,
train_mean - train_std,
alpha=0.15, color='b')
ax.plot(train_sizes, test_mean,
color='g', linestyle='--',
marker='s', markersize=5,
label='validation accuracy')
ax.fill_between(train_sizes,
test_mean + test_std,
test_mean - test_std,
alpha=0.15, color='g')
ax.grid()
ax.set_xlabel('Number of training samples')
ax.set_ylabel('Accuracy')
ax.set_title('max depth = {}'.format(max_depth))
ax.set_ylim([0.6, 0.9])
ax.legend()
plt.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preprocessing
Step2: Feature selection
Step3: Feature transform
Step4: Produce 10-fold CV learning curve
|
13,299
|
<ASSISTANT_TASK:>
Python Code:
def encode_shift(s: str): return "".join([chr(((ord(ch) + 5 - ord("a")) % 26) + ord("a")) for ch in s])def decode_shift(s: str): takes as input string encoded with encode_shift function. Returns decoded string. return "".join([chr(((ord(ch) - 5 - ord("a")) % 26) + ord("a")) for ch in s])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.