Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
10,200
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
import matplotlib.pyplot as plt
%matplotlib inline
import input_data
mnist = input_data.read_data_sets('fashion-mnist/data/fashion', one_hot=True)
n_samples = mnist.train.num_examples
class VAE:
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
self.batch_size = batch_size
self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])
self._create_network()
self._create_loss_optimizer()
init = tf.global_variables_initializer()
self.sess = tf.InteractiveSession()
self.sess.run(init)
def _create_network(self):
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
self.z_mean, self.z_log_sigma_sq = self._recognition_network()
# Draw one sample z from Gaussian distribution
n_z = self.network_architecture["n_z"]
# tip: use tf.random_normal
eps = #
# z = mu + sigma*epsilon
self.z = tf.add(self.z_mean,
tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
# Use generator to determine mean of
# Bernoulli distribution of reconstructed input
self.x_reconstr_mean = self._generator_network()
def _recognition_network(self):
layer_1 = slim.fully_connected(self.x, self.network_architecture['n_hidden_recog_1'])
layer_2 = slim.fully_connected(layer_1, self.network_architecture['n_hidden_recog_2'])
z_mean = slim.fully_connected(layer_2, self.network_architecture['n_z'])
z_log_sigma_sq = slim.fully_connected(layer_2, self.network_architecture['n_z'])
return z_mean, z_log_sigma_sq
def _generator_network(self):
layer_1 = slim.fully_connected(self.z, self.network_architecture['n_hidden_recog_1'])
layer_2 = slim.fully_connected(layer_1, self.network_architecture['n_hidden_recog_2'])
x_reconstr_mean = slim.fully_connected(layer_2, self.network_architecture['n_input'])
return x_reconstr_mean
def _create_loss_optimizer(self):
reconstr_loss = #\
latent_loss = #
self.cost = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
def partial_fit(self, X):
Train model based on mini-batch of input data.
Return cost of mini-batch.
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict={self.x: X})
return cost
def transform(self, X):
Transform data by mapping it into the latent space.
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.z_mean, feed_dict={self.x: X})
def generate(self, z_mu=None):
Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
if z_mu is None:
z_mu = np.random.normal(size=self.network_architecture["n_z"])
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.z: z_mu})
def reconstruct(self, X):
Use VAE to reconstruct given data.
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.x: X})
def train(network_architecture, learning_rate=0.001,
batch_size=1000, training_epochs=10, display_step=5):
vae = VAE(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, _ = mnist.train.next_batch(batch_size)
# Fit training using batch data
cost = vae.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(avg_cost))
return vae
network_architecture = \
dict(n_hidden_recog_1=500, # 1st layer encoder neurons
n_hidden_recog_2=500, # 2nd layer encoder neurons
n_hidden_gener_1=500, # 1st layer decoder neurons
n_hidden_gener_2=500, # 2nd layer decoder neurons
n_input=784, # MNIST data input (img shape: 28*28)
n_z=20) # dimensionality of latent space
# vae = train(network_architecture, training_epochs=75)
x_sample = mnist.test.next_batch(1000)[0]
x_reconstruct = vae.reconstruct(x_sample)
plt.figure(figsize=(8, 12))
for i in range(5):
plt.subplot(5, 2, 2*i + 1)
plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.title("Test input")
plt.colorbar()
plt.subplot(5, 2, 2*i + 2)
plt.imshow(x_reconstruct[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.title("Reconstruction")
plt.colorbar()
plt.tight_layout()
network_architecture = \
dict(n_hidden_recog_1=500, # 1st layer encoder neurons
n_hidden_recog_2=500, # 2nd layer encoder neurons
n_hidden_gener_1=500, # 1st layer decoder neurons
n_hidden_gener_2=500, # 2nd layer decoder neurons
n_input=784, # MNIST data input (img shape: 28*28)
n_z=`) # dimensionality of latent space
vae_2d = train(network_architecture, training_epochs=75)
x_sample, y_sample = mnist.test.next_batch(5000)
z_mu = vae_2d.transform(x_sample)
plt.figure(figsize=(8, 6))
plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
plt.colorbar()
plt.grid()
nx = ny = 20
x_values = np.linspace(-3, 3, nx)
y_values = np.linspace(-3, 3, ny)
canvas = np.empty((28*ny, 28*nx))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mu = np.array([[xi, yi]]*vae.batch_size)
x_mean = vae_2d.generate(z_mu)
canvas[(nx-i-1)*28:(nx-i)*28, j*28:(j+1)*28] = x_mean[0].reshape(28, 28)
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Task
Step6: Visualize reconstruction quality
Step7: Illustrating latent space
Step8: An other way of getting insights into the latent space is to use the generator network to plot reconstrunctions at the positions in the latent space for which they have been generated
|
10,201
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
california_housing_dataframe["median_house_value"] /= 1000.0
california_housing_dataframe
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
Trains a linear regression model of one feature.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(buffer_size=10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def train_model(learning_rate, steps, batch_size, input_feature):
Trains a linear regression model.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
input_feature: A `string` specifying a column from `california_housing_dataframe`
to use as input feature.
Returns:
A Pandas `DataFrame` containing targets and the corresponding predictions done
after training the model.
periods = 10
steps_per_period = steps / periods
my_feature = input_feature
my_feature_data = california_housing_dataframe[[my_feature]].astype('float32')
my_label = "median_house_value"
targets = california_housing_dataframe[my_label].astype('float32')
# Create input functions.
training_input_fn = lambda: my_input_fn(my_feature_data, targets, batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column(my_feature)]
# Create a linear regressor object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
# Set up to plot the state of our model's line each period.
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
plt.title("Learned Line by Period")
plt.ylabel(my_label)
plt.xlabel(my_feature)
sample = california_housing_dataframe.sample(n=300)
plt.scatter(sample[my_feature], sample[my_label])
colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
root_mean_squared_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period,
)
# Take a break and compute predictions.
predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
predictions = np.array([item['predictions'][0] for item in predictions])
# Compute loss.
root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(predictions, targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, root_mean_squared_error))
# Add the loss metrics from this period to our list.
root_mean_squared_errors.append(root_mean_squared_error)
# Finally, track the weights and biases over time.
# Apply some math to ensure that the data and line are plotted neatly.
y_extents = np.array([0, sample[my_label].max()])
weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0]
bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')
x_extents = (y_extents - bias) / weight
x_extents = np.maximum(np.minimum(x_extents,
sample[my_feature].max()),
sample[my_feature].min())
y_extents = weight * x_extents + bias
plt.plot(x_extents, y_extents, color=colors[period])
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.subplot(1, 2, 2)
plt.ylabel('RMSE')
plt.xlabel('Periods')
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(root_mean_squared_errors)
# Create a table with calibration data.
calibration_data = pd.DataFrame()
calibration_data["predictions"] = pd.Series(predictions)
calibration_data["targets"] = pd.Series(targets)
display.display(calibration_data.describe())
print("Final RMSE (on training data): %0.2f" % root_mean_squared_error)
return calibration_data
california_housing_dataframe.head()
#
# YOUR CODE HERE
#
california_housing_dataframe["rooms_per_person"] = california_housing_dataframe['total_rooms'] / california_housing_dataframe['population']
california_housing_dataframe.head()
calibration_data = train_model(
learning_rate=0.5,
steps=500,
batch_size=10,
input_feature="rooms_per_person"
)
california_housing_dataframe["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] / california_housing_dataframe["population"])
calibration_data = train_model(
learning_rate=0.05,
steps=500,
batch_size=5,
input_feature="rooms_per_person")
calibration_data.plot(kind='scatter', x='targets', y='predictions')
california_housing_dataframe['rooms_per_person'].plot(kind='hist', bins=50)
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
plt.scatter(calibration_data["predictions"], calibration_data["targets"])
plt.subplot(1, 2, 2)
_ = california_housing_dataframe["rooms_per_person"].hist()
# YOUR CODE HERE
california_housing_dataframe["rooms_per_person"] = (
california_housing_dataframe["rooms_per_person"]).apply(lambda x: min(x, 5))
_ = california_housing_dataframe["rooms_per_person"].hist()
calibration_data = train_model(
learning_rate=0.05,
steps=500,
batch_size=5,
input_feature="rooms_per_person")
_ = plt.scatter(calibration_data["predictions"], calibration_data["targets"])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Synthetic Features and Outliers
Step4: Next, we'll set up our input function, and define the function for model training
Step5: Task 1
Step6: Solution
Step7: Task 2
Step8: Solution
Step9: The calibration data shows most scatter points aligned to a line. The line is almost vertical, but we'll come back to that later. Right now let's focus on the ones that deviate from the line. We notice that they are relatively few in number.
Step10: Task 3
Step11: Solution
Step12: To verify that clipping worked, let's train again and print the calibration data once more
|
10,202
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import style
style.use('https://raw.githubusercontent.com/JoseGuzman/minibrain/master/minibrain/paper.mplstyle')
from scipy.stats import norm
mu = 0
sigma = 1 # std
rv = norm(loc = mu, scale = sigma)
x = np.linspace(-4,15, 200)
plt.plot(x, rv.pdf(x), label=f'$\mu$ = {mu} $\sigma^2$ = {sigma**2:}')
plt.hist(rv.rvs(10000), density=True, alpha=0.5);
mu = 6
sigma = 2.
rv = norm(loc = mu, scale = sigma)
plt.plot(x, rv.pdf(x), label=f'$\mu$ = {mu} $\sigma^2$ = {sigma**2:}')
plt.hist(rv.rvs(10000), density=True, alpha=0.5);
plt.xlabel('x'); plt.ylabel('PDF')
plt.legend()
np.random.seed(2020)
x = np.random.normal(loc = 0, scale = 1, size = 700)
y = np.random.normal(loc = 6, scale = 2, size = 700)
X = np.vstack((x,y)).T
plt.figure(figsize=(4,4))
plt.scatter(x, y, s=3, color='k')
plt.xticks(np.arange(-4,15, 4))
plt.yticks(np.arange(-4,15, 4))
plt.xlabel('x'), plt.ylabel('y', rotation = 0)
plt.grid('on')
def mycov(x,y):
Computes the covariance between x and y, being x and y two vectors.
Assumes x.size == y.size
cov = lambda x,y: np.sum( ( x-x.mean() ) * ( y-y.mean() ) )/(len(x)-1)
C = np.array( [ [cov(x,x), cov(x,y) ], [cov(y,x), cov(y,y)] ])
return(C)
mycov(x,y) #x_var = 0.99, y_var = 3.64 (almost 4)
X.shape # matrix of 500 observations and 2 dimensions (features)
np.matmul(X.T,X)/(len(x)-1) # didn't substract mean!
F= X - np.mean(X, axis=0)
np.matmul(F.T,F)/(len(x)-1) # now mean substracted!
np.cov(x,y) # numpy substracts the mean
np.cov(X.T)
C = np.cov(X.T)
X = np.random.multivariate_normal(mean = [0, 0], cov = C, size = 700)
plt.figure(figsize=(4,4))
plt.scatter(X[:,0], X[:,1], s=3, color='k')
plt.xticks(np.arange(-4,15, 4))
plt.yticks(np.arange(-4,15, 4))
plt.xlabel('x'), plt.ylabel('y', rotation = 0)
plt.grid('on')
rho = 0.8
var1, var2 = 2, 6
cov = rho*np.sqrt(var1*var2)
C = np.array([[var1, cov],[cov, var2]])
X = np.random.multivariate_normal(mean = [0, 0], cov = C, size = 700)
plt.figure(figsize=(4,4))
plt.scatter(X[:,0], X[:,1], s=3, color='k')
plt.xticks(np.arange(-4,15, 4))
plt.yticks(np.arange(-4,15, 4))
plt.xlabel('x'), plt.ylabel('y', rotation = 0)
plt.grid('on')
C2 = np.cov(X.T)
C
np.cov(X.T)#C2
A = np.array([[0.6165, 0.6154],[0.617554, 0.7165]])
np.linalg.eig(A)
np.linalg.norm([-0.73454232, -0.67728279])
A = np.array([[2, 3],[2,1]])
v = np.array([1,3])
np.matmul(A,v)
C2
eigVal, eigVect = np.linalg.eig(C2)
eigVal
eigVect
comp0, comp1 = eigVect[:,0]*eigVal[0], eigVect[:,1]*eigVal[1]
comp1
plt.figure(figsize=(4,4))
plt.scatter(X[:,0], X[:,1], s=3, color='gray', alpha=0.4)
plt.arrow(x=0, y=0, dx =comp0[0], dy =comp0[1], lw=2, color='g')
plt.arrow(x=0, y=0, dx =comp1[0], dy =comp1[1], lw=2, color='r')
mysum = comp0 + comp1
plt.arrow(x=0, y=0, dx = mysum[0], dy = mysum[1], lw = 2, color='b')
#plt.plot([0, e*v[0]],[0, e*v[1]], 'r-', lw=2)
plt.xticks(np.arange(-4,9, 4))
plt.yticks(np.arange(-4,9, 4))
plt.xlabel('x'), plt.ylabel('y', rotation =0)
plt.grid('on')
np.std(X[:,1])**2
eVe[0]*eVa.T[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Gaussian Distribution
Step3: The variance
Step4: The covariance is always measured between two dimensions. If we have datasets with more than two dimensions
Step5: The covariance matrix can be computed from a data set with zero mean with
Step6: The covariance is ultimately with the correlation coefficient. The correlation coefficient is the normlized
Step7: Linear transformation
Step8: Plot
|
10,203
|
<ASSISTANT_TASK:>
Python Code:
!pip install --upgrade pymongo
from pprint import pprint as pp
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
matplotlib.style.use('ggplot')
import pymongo
from pymongo import MongoClient
client = MongoClient("mongo",27017)
client
client.list_database_names()
import csv
from datetime import datetime
def csv_to_mongo(file, coll):
Carga un fichero CSV en Mongo. file especifica el fichero, coll la colección
dentro de la base de datos, y date_cols las columnas que serán interpretadas
como fechas.
# Convertir todos los elementos que se puedan a números
def to_numeric(d):
try:
return int(d)
except ValueError:
try:
return float(d)
except ValueError:
return d
def to_date(d):
To ISO Date. If this cannot be converted, return NULL (None)
try:
return datetime.strptime(d, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
return None
coll.drop()
with open(file, encoding='utf-8') as f:
# La llamada csv.reader() crea un iterador sobre un fichero CSV
reader = csv.reader(f, dialect='excel')
# Se leen las columnas. Sus nombres se usarán para crear las diferentes columnas en la familia
columns = next(reader)
# Las columnas que contienen 'Date' se interpretan como fechas
func_to_cols = list(map(lambda c: to_date if 'date' in c.lower() else to_numeric, columns))
docs=[]
for row in reader:
row = [func(e) for (func,e) in zip(func_to_cols, row)]
docs.append(dict(zip(columns, row)))
coll.insert_many(docs)
import os
import os.path as path
from urllib.request import urlretrieve
def download_csv_upper_dir(baseurl, filename):
file = path.abspath(path.join(os.getcwd(),os.pardir,filename))
if not os.path.isfile(file):
urlretrieve(baseurl + '/' + filename, file)
baseurl = 'http://neuromancer.inf.um.es:8080/es.stackoverflow/'
download_csv_upper_dir(baseurl, 'Posts.csv')
download_csv_upper_dir(baseurl, 'Users.csv')
download_csv_upper_dir(baseurl, 'Tags.csv')
download_csv_upper_dir(baseurl, 'Comments.csv')
download_csv_upper_dir(baseurl, 'Votes.csv')
db = client.stackoverflow
db = client['stackoverflow']
db
posts = db.posts
posts
csv_to_mongo('../Posts.csv',db.posts)
csv_to_mongo('../Users.csv',db.users)
csv_to_mongo('../Votes.csv',db.votes)
csv_to_mongo('../Comments.csv',db.comments)
csv_to_mongo('../Tags.csv',db.tags)
posts.count_documents({})
post = posts.find_one()
post
users = db.users
pp(users.find_one())
print (type(post['_id']))
post['_id']
#posts.save(post)
result = posts.replace_one({"_id": post['_id']}, post)
result.modified_count
post = posts.find_one()
pp(post)
for k,v in post.items():
print("%s: %s" % (k,v))
respuestas = posts.find({"PostTypeId": 2})
respuestas.count()
posts.find({"PostTypeId": 2}).explain()
respuestas = posts.find({"PostTypeId": 2}).limit(10)
respuestas
list(respuestas)
respuestas = posts.find({"PostTypeId": 2}).limit(30)
df = pd.DataFrame(respuestas)
df['Id'].plot()
respuestas = posts.find({ '$and' : [ {"PostTypeId": 2} ,
{"Id" : {'$gte' : 100}} ]}).limit(10)
list(respuestas)
from bson.code import Code
map = Code(
'''
function () {
emit(this.OwnerUserId, 1);
}
''')
reduce = Code(
'''
function (key, values)
{
return Array.sum(values);
}
''')
results = db.posts.map_reduce(map, reduce, "myresults")
db.list_collection_names()
list(results.find())
results = db.posts.map_reduce(map, reduce, "myresults", query={"Score": {'$gt' : 20}})
list(results.find())
db.users.find_one({'Id':20})
db.posts.distinct('Score')
respuestas = db['posts'].aggregate( [ {'$project' : { 'Id' : True }}, {'$limit': 20} ])
list(respuestas)
respuestas = posts.aggregate( [
{'$match': { 'Score' : {'$gte': 40}}},
{'$lookup': {
'from': "users",
'localField': "OwnerUserId",
'foreignField': "Id",
'as': "owner"}
}
])
list(respuestas)
respuestas = db.posts.aggregate( [
{'$match': { 'Score' : {'$gte': 40}}},
{'$lookup': {
'from': "users",
'localField': "OwnerUserId",
'foreignField': "Id",
'as': "owner"}
},
{ '$project' :
{
'Id' : True,
'Score' : True,
'username' : {'$arrayElemAt' : ['$owner.DisplayName', 0]},
'owner.DisplayName' : True
}}
])
list(respuestas)
respuestas = db.posts.aggregate( [
{'$match': { 'Score' : {'$gte': 40}}},
{'$lookup': {
'from': "users",
'localField': "OwnerUserId",
'foreignField': "Id",
'as': "owner"}
},
{ '$unwind': '$owner'},
{ '$project' :
{
'Id' : True,
'Score': True,
'username': '$owner.DisplayName'
}
}
])
list(respuestas)
posts.create_index([('Id', pymongo.HASHED)])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Usaremos la librería pymongo para python. La cargamos a continuación.
Step2: La conexión se inicia con MongoClient en el host descrito en el fichero docker-compose.yml (mongo).
Step5: Format
Step6: Las bases de datos se crean conforme se nombran. Se puede utilizar la notación punto o la de diccionario. Las colecciones también.
Step7: Las bases de datos están compuestas por un conjunto de colecciones. Cada colección aglutina a un conjunto de objetos (documentos) del mismo tipo, aunque como vimos en teoría, cada documento puede tener un conjunto de atributos diferente.
Step8: Importación de los ficheros CSV. Por ahora creamos una colección diferente para cada uno. Después estudiaremos cómo poder optimizar el acceso usando agregación.
Step9: El API de colección de MongoDB
Step10: Utilizo la librería pp para imprimir los objetos grandes de una manera amigable.
Step11: A cada objeto se le asigna una clave implícita con nombre "_id" (si el objeto no lo incluye).
Step12: La siguiente sintaxis está descatalogada en las nuevas versiones, pero era más conveniente
Step13: Ahora hay que hacerlo así (el resultado debe ser 1 documento modificado)
Step14: Además de find_one(), la función principal de búsqueda es find(). Esta función ofrece un conjunto muy ámplio de opciones para búsqueda, que estudiaremos a continuación.
Step15: También existe explain(), al estilo de SQL.
Step16: También se puede limitar la búsqueda.
Step17: La respuesta no es un conjunto de elementos, sino un cursor que puede ir recorriéndose.
Step18: También se puede importar en un dataframe de pandas
Step19: La función find() tiene un gran número de posibilidades para especificar la búsqueda. Se pueden utilizar cualificadores complejos como
Step20: Map-Reduce
Step21: También hay operaciones específicas de la coleción, como count(), groupby() y distinct()
Step22: Framework de Agregación
Step23: Lookup!
Step24: El $lookup genera un array con todos los resultados. El operador $arrayElementAt accede al primer elemento.
Step25: $unwind también puede usarse. "Desdobla" cada fila por cada elemento del array. En este caso, como sabemos que el array sólo contiene un elemento, sólo habrá una fila por fila original, pero sin el array. Finalmente se puede proyectar el campo que se quiera.
Step26: Se pueden crear más índices, de tipos ASCENDING, DESCENDING, HASHED, y otros geoespaciales. https
|
10,204
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
big_df = pd.read_csv("UCI_Credit_Card.csv")
big_df.head()
len(big_df)
len(big_df.dropna())
df = big_df.drop(labels = ['ID'], axis = 1)
labels = df['default.payment.next.month']
df.drop('default.payment.next.month', axis = 1, inplace = True)
num_samples = 25000
train_x, train_y = df[0:num_samples], labels[0:num_samples]
test_x, test_y = df[num_samples:], labels[num_samples:]
test_x.head()
train_y.head()
class bin_transformer(object):
def __init__(self, df, num_quantiles = 2):
self.quantiles = df.quantile(np.linspace(1./num_quantiles, 1.-1./num_quantiles,num_quantiles-1))
def transform(self, df):
new = pd.DataFrame()
fns = {}
for col_name in df.axes[1]:
for ix, q in self.quantiles.iterrows():
quart = q[col_name]
new[col_name+str(ix)] = (df[col_name] >= quart)
fns[col_name+str(ix)] =(col_name, lambda x: x[col_name]>=quart)
return new, fns
transformer = bin_transformer(df,5)
train_x_t, tr_fns = transformer.transform(train_x)
test_x_t, test_fns = transformer.transform(test_x)
train_x_t.head()
tr_fns
def bdd_cross_entropy(pred, label):
return -np.mean(label*np.log(pred+10**(-20)))
def MSE(pred,label):
return np.mean((pred-label)**2)
def acc(pred,label):
return np.mean((pred>=0.5)==(label == 1))
def find_split(x, y, loss, verbose = False):
min_ax = None
base_loss = loss(np.mean(y),y)
min_loss = base_loss
N = len(x)
for col_name in x.axes[1]:
mask = x[col_name]
num_pos = np.sum(mask)
num_neg = N - num_pos
pos_y = np.mean(y[mask])
neg_y = np.mean(y[~mask])
l = (num_pos*loss(pos_y, y[mask]) + num_neg*loss(neg_y, y[~mask]))/N
if verbose:
print("Column {0} split has improved loss {1}".format(col_name, base_loss-l))
if l < min_loss:
min_loss = l
min_ax = col_name
return min_ax, min_loss
find_split(train_x_t, train_y, MSE, verbose = True)
find_split(train_x_t, train_y, bdd_cross_entropy, verbose = 0)
find_split(train_x_t, train_y, acc, verbose = 0)
np.mean(train_y[train_x_t['PAY_00.8']])
np.mean(train_y[~train_x_t['PAY_00.8']])
np.mean(train_y[train_x_t['AGE0.2']])
np.mean(train_y[~train_x_t['AGE0.2']])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us load the credit card dataset and extract a small dataframe of numerical features to test on.
Step2: Now let us write our transformation function.
Step3: Now let us build some simple loss functions for 1d labels.
Step4: Now let us define the find split function.
|
10,205
|
<ASSISTANT_TASK:>
Python Code:
from threeML import *
%matplotlib inline
import warnings
warnings.simplefilter('ignore')
# create the simulated observation
energies = np.logspace(1,4,151)
low_edge = energies[:-1]
high_edge = energies[1:]
# get a BPL source function
source_function = Broken_powerlaw(K=2,xb=300,piv=300, alpha=0., beta=-3.)
# power law background function
background_function = Powerlaw(K=.5,index=-1.5, piv=100.) + Gaussian(F=50,mu=511,sigma=20)
spectrum_generator = SpectrumLike.from_function('fake',
source_function=source_function,
background_function=background_function,
energy_min=low_edge,
energy_max=high_edge)
spectrum_generator.view_count_spectrum()
# instance our source spectrum
bpl = Broken_powerlaw(piv=300,xb=500)
# instance a point source
ra, dec = 0,0
ps_src = PointSource('source',ra,dec,spectral_shape=bpl)
# instance the likelihood model
src_model = Model(ps_src)
# pass everything to a joint likelihood object
jl_profile = JointLikelihood(src_model,DataList(spectrum_generator))
# fit the model
_ = jl_profile.fit()
# plot the fit in count space
_ = spectrum_generator.display_model(step=False)
spectrum_generator.rebin_on_background(1)
spectrum_generator.view_count_spectrum()
_ = jl_profile.fit()
_ = spectrum_generator.display_model(step=False)
# extract the background from the spectrum plugin.
# This works for OGIPLike plugins as well, though we could easily also just read
# in a bakcground PHA
background_plugin = SpectrumLike.from_background('bkg',spectrum_generator)
background_plugin.view_count_spectrum()
# instance the spectrum setting the line's location to 511
bkg_spectrum = Powerlaw(piv=100) + Gaussian(F=50,mu=511)
# setup model parameters
# fix the line's location
bkg_spectrum.mu_2.fix = True
# nice parameter bounds
bkg_spectrum.K_1.bounds = (1E-4, 10)
bkg_spectrum.F_2.bounds = (0., 1000)
bkg_spectrum.sigma_2.bounds = (2,30)
ps_bkg = PointSource('bkg',0,0,spectral_shape=bkg_spectrum)
bkg_model = Model(ps_bkg)
jl_bkg = JointLikelihood(bkg_model,DataList(background_plugin))
_ = jl_bkg.fit()
_ = background_plugin.display_model(step=False, data_color='#1A68F0', model_color='#FF9700')
modeled_background_plugin = SpectrumLike('full',
# here we use the original observation
observation=spectrum_generator.observed_spectrum,
# we pass the background plugin as the background!
background=background_plugin)
modeled_background_plugin.view_count_spectrum()
modeled_background_plugin.nuisance_parameters
# instance the source model... the background plugin has it's model already specified
bpl = Broken_powerlaw(piv=300,xb=500)
bpl.K.bounds = (1E-5,1E1)
bpl.xb.bounds = (1E1,1E4)
ps_src = PointSource('source',0,0,bpl)
src_model = Model(ps_src)
jl_src = JointLikelihood(src_model,DataList(modeled_background_plugin))
_ = jl_src.fit()
# over plot the joint background and source fits
fig = modeled_background_plugin.display_model(step=False)
_ = background_plugin.display_model(data_color='#1A68F0', model_color='#FF9700',model_subplot=fig.axes,step=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we will create an observation where we have a simulated broken power law source spectrum along with an observed background spectrum. The background is a powerl law continuum with a Gaussian line.
Step2: Using a profile likelihood
Step3: Our fit recovers the simulated parameters. However, we should have binned the spectrum up such that there is at least one background count per spectral bin for the profile to be valid.
Step4: Modeling the background
Step5: This constructs a new plugin with only the observed background so that we can first model it.
Step6: We now construct our background model and fit it to the data. Let's assume we know that the line occurs at 511 keV, but we are unsure of its strength an width. We do not need to bin the data up because we are using a simple Poisson likelihood which is valid even when we have zero counts Cash (1979).
Step7: We now have a model and estimate for the background which we can use when fitting with the source spectrum. We now create a new plugin with just the total observation and pass our background plugin as the background argument.
Step8: When we look at out count spectrum now, we will see the predicted background, rather than the measured one
Step9: Now we simply fit the spectrum as we did in the profiled case. The background plugin's parameters are stored in our new plugin as nuissance parameters
Step10: and the fitting engine will use them in the fit. The parameters will still be connected to the background plugin and its model and thus we can free/fix them there as well as set priors on them.
|
10,206
|
<ASSISTANT_TASK:>
Python Code:
#|export
import tensorboard
from torch.utils.tensorboard import SummaryWriter
from fastai.callback.fp16 import ModelToHalf
from fastai.callback.hook import hook_output
#|export
class TensorBoardBaseCallback(Callback):
order = Recorder.order+1
"Base class for tensorboard callbacks"
def __init__(self): self.run_projector = False
def after_pred(self):
if self.run_projector: self.feat = _add_projector_features(self.learn, self.h, self.feat)
def after_validate(self):
if not self.run_projector: return
self.run_projector = False
self._remove()
_write_projector_embedding(self.learn, self.writer, self.feat)
def after_fit(self):
if self.run: self.writer.close()
def _setup_projector(self):
self.run_projector = True
self.h = hook_output(self.learn.model[1][1] if not self.layer else self.layer)
self.feat = {}
def _setup_writer(self): self.writer = SummaryWriter(log_dir=self.log_dir)
def __del__(self): self._remove()
def _remove(self):
if getattr(self, 'h', None): self.h.remove()
#|export
class TensorBoardCallback(TensorBoardBaseCallback):
"Saves model topology, losses & metrics for tensorboard and tensorboard projector during training"
def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9, projector=False, layer=None):
super().__init__()
store_attr()
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
self._setup_writer()
if self.trace_model:
if hasattr(self.learn, 'mixed_precision'):
raise Exception("Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.")
b = self.dls.one_batch()
self.learn._split(b)
self.writer.add_graph(self.model, *self.xb)
def after_batch(self):
self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter)
for i,h in enumerate(self.opt.hypers):
for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter)
def after_epoch(self):
for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]):
self.writer.add_scalar(n, v, self.train_iter)
if self.log_preds:
b = self.dls.valid.one_batch()
self.learn.one_batch(0, b)
preds = getattr(self.loss_func, 'activation', noop)(self.pred)
out = getattr(self.loss_func, 'decodes', noop)(preds)
x,y,its,outs = self.dls.valid.show_results(b, out, show=False, max_n=self.n_preds)
tensorboard_log(x, y, its, outs, self.writer, self.train_iter)
def before_validate(self):
if self.projector: self._setup_projector()
#|export
class TensorBoardProjectorCallback(TensorBoardBaseCallback):
"Extracts and exports image featuers for tensorboard projector during inference"
def __init__(self, log_dir=None, layer=None):
super().__init__()
store_attr()
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
self._setup_writer()
def before_validate(self):
self._setup_projector()
#|export
def _write_projector_embedding(learn, writer, feat):
lbls = [learn.dl.vocab[l] for l in feat['lbl']] if getattr(learn.dl, 'vocab', None) else None
vecs = feat['vec'].squeeze()
writer.add_embedding(vecs, metadata=lbls, label_img=feat['img'], global_step=learn.train_iter)
#|export
def _add_projector_features(learn, hook, feat):
img = _normalize_for_projector(learn.x)
first_epoch = True if learn.iter == 0 else False
feat['vec'] = hook.stored if first_epoch else torch.cat((feat['vec'], hook.stored),0)
feat['img'] = img if first_epoch else torch.cat((feat['img'], img),0)
if getattr(learn.dl, 'vocab', None):
feat['lbl'] = learn.y if first_epoch else torch.cat((feat['lbl'], learn.y),0)
return feat
#|export
def _get_embeddings(model, layer):
layer = model[0].encoder if layer == None else layer
return layer.weight
#|export
@typedispatch
def _normalize_for_projector(x:TensorImage):
# normalize tensor to be between 0-1
img = x.clone()
sz = img.shape
img = img.view(x.size(0), -1)
img -= img.min(1, keepdim=True)[0]
img /= img.max(1, keepdim=True)[0]
img = img.view(*sz)
return img
#|export
from fastai.text.all import LMLearner, TextLearner
#|export
def projector_word_embeddings(learn=None, layer=None, vocab=None, limit=-1, start=0, log_dir=None):
"Extracts and exports word embeddings from language models embedding layers"
if not layer:
if isinstance(learn, LMLearner): layer = learn.model[0].encoder
elif isinstance(learn, TextLearner): layer = learn.model[0].module.encoder
emb = layer.weight
img = torch.full((len(emb),3,8,8), 0.7)
vocab = learn.dls.vocab[0] if vocab == None else vocab
vocab = list(map(lambda x: f'{x}_', vocab))
writer = SummaryWriter(log_dir=log_dir)
end = start + limit if limit >= 0 else -1
writer.add_embedding(emb[start:end], metadata=vocab[start:end], label_img=img[start:end])
writer.close()
#|export
from fastai.vision.data import *
#|export
@typedispatch
def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step):
fig,axs = get_grid(len(samples), return_fig=True)
for i in range(2):
axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)]
axs = [r.show(ctx=c, color='green' if b==r else 'red')
for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)]
writer.add_figure('Sample results', fig, step)
#|export
from fastai.vision.core import TensorPoint,TensorBBox
#|export
@typedispatch
def tensorboard_log(x:TensorImage, y: (TensorImageBase, TensorPoint, TensorBBox), samples, outs, writer, step):
fig,axs = get_grid(len(samples), return_fig=True, double=True)
for i in range(2):
axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])]
for x in [samples,outs]:
axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])]
writer.add_figure('Sample results', fig, step)
from fastai.vision.all import Resize, RandomSubsetSplitter, aug_transforms, vision_learner, resnet18
path = untar_data(URLs.PETS)
db = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
item_tfms=Resize(128),
splitter=RandomSubsetSplitter(train_sz=0.1, valid_sz=0.01),
batch_tfms=aug_transforms(size=64),
get_y=using_attr(RegexLabeller(r'(.+)_\d+.*$'), 'name'))
dls = db.dataloaders(path/'images')
learn = vision_learner(dls, resnet18, metrics=accuracy)
learn.unfreeze()
learn.fit_one_cycle(3, cbs=TensorBoardCallback(Path.home()/'tmp'/'runs'/'tb', trace_model=True))
path = untar_data(URLs.PETS)
db = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
item_tfms=Resize(128),
splitter=RandomSubsetSplitter(train_sz=0.05, valid_sz=0.01),
batch_tfms=aug_transforms(size=64),
get_y=using_attr(RegexLabeller(r'(.+)_\d+.*$'), 'name'))
dls = db.dataloaders(path/'images')
cbs = [TensorBoardCallback(log_dir=Path.home()/'tmp'/'runs'/'vision1', projector=True)]
learn = vision_learner(dls, resnet18, metrics=accuracy)
learn.unfreeze()
learn.fit_one_cycle(3, cbs=cbs)
path = untar_data(URLs.PETS)
db = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
item_tfms=Resize(128),
splitter=RandomSubsetSplitter(train_sz=0.1, valid_sz=0.01),
batch_tfms=aug_transforms(size=64),
get_y=using_attr(RegexLabeller(r'(.+)_\d+.*$'), 'name'))
dls = db.dataloaders(path/'images')
files = get_image_files(path/'images')
files = files[:256]
dl = learn.dls.test_dl(files, with_labels=True)
learn = vision_learner(dls, resnet18, metrics=accuracy)
layer = learn.model[1][0].ap
cbs = [TensorBoardProjectorCallback(layer=layer, log_dir=Path.home()/'tmp'/'runs'/'vision2')]
_ = learn.get_preds(dl=dl, cbs=cbs)
from fastai.text.all import TextDataLoaders, text_classifier_learner, AWD_LSTM
dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test')
learn = text_classifier_learner(dls, AWD_LSTM, drop_mult=0.5, metrics=accuracy)
projector_word_embeddings(learn, limit=1000, log_dir=Path.home()/'tmp'/'runs'/'text')
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
tokenizer = GPT2TokenizerFast.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
layer = model.transformer.wte
vocab_dict = tokenizer.get_vocab()
vocab = [k for k, v in sorted(vocab_dict.items(), key=lambda x: x[1])]
projector_word_embeddings(layer=layer, vocab=vocab, limit=2000, log_dir=Path.home()/'tmp'/'runs'/'transformers')
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
model = AutoModel.from_pretrained("bert-base-uncased")
layer = model.embeddings.word_embeddings
vocab_dict = tokenizer.get_vocab()
vocab = [k for k, v in sorted(vocab_dict.items(), key=lambda x: x[1])]
projector_word_embeddings(layer=layer, vocab=vocab, limit=2000, start=2000, log_dir=Path.home()/'tmp'/'runs'/'transformers')
#|hide
from nbdev.export import *
notebook2script()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TensorBoardCallback
Step2: Projector
Step3: TensorBoardProjectorCallback
Step4: projector_word_embeddings
Step5: transformers
Step6: BERT
Step7: Validate results in tensorboard
|
10,207
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
df = pd.read_csv('bikes_rent.csv')
df.head()
fig, axes = plt.subplots(nrows=3, ncols=4, figsize=(15, 10))
for idx, feature in enumerate(df.columns[:-1]):
df.plot(feature, "cnt", subplots=True, kind="scatter", ax=axes[idx // 4, idx % 4])
# Код 1.1 (0.5 балла)
# Посчитайте корреляции всех признаков, кроме последнего, с последним с помощью метода corrwith:
df[df.columns[:-1]].corrwith(df[df.columns[-1]])
# Код 1.2 (0.5 балла)
# Посчитайте попарные корреляции между признаками temp, atemp, hum, windspeed(mph), windspeed(ms) и cnt
# с помощью метода corr:
df[['temp', 'atemp', 'hum', 'windspeed(mph)', 'windspeed(ms)', 'cnt']].corr()
# Код 1.3 (0.5 балла)
# Выведите средние признаков
df.describe().iloc[[1]]
from sklearn.preprocessing import scale
from sklearn.utils import shuffle
df_shuffled = shuffle(df, random_state=123)
X = scale(df_shuffled[df_shuffled.columns[:-1]])
y = df_shuffled["cnt"]
from sklearn.linear_model import LinearRegression
# Код 2.1 (1 балл)
# Создайте объект линейного регрессора, обучите его на всех данных и выведите веса модели
# (веса хранятся в переменной coef_ класса регрессора).
# Можно выводить пары (название признака, вес), воспользовавшись функцией zip, встроенной в язык python
# Названия признаков хранятся в переменной df.columns
reg = LinearRegression()
reg.fit(X, y)
for weight, name in zip(reg.coef_, df.columns[:-1]):
print('%s - %s' % (name, weight))
from sklearn.linear_model import Lasso, Ridge
# Код 2.2 (0.5 балла)
# Обучите линейную модель с L1-регуляризацией и выведите веса
las = Lasso()
las.fit(X, y)
for weight, name in zip(las.coef_, df.columns[:-1]):
print('%s - %s' % (name, weight))
# Код 2.3 (0.5 балла)
# Обучите линейную модель с L2-регуляризацией и выведите веса
rid = Ridge()
rid.fit(X, y)
for weight, name in zip(rid.coef_, df.columns[:-1]):
print('%s - %s' % (name, weight))
# Код 3.1 (1 балл)
alphas = np.arange(1, 500, 50)
coefs_lasso = np.zeros((alphas.shape[0], X.shape[1])) # матрица весов размера (число регрессоров) x (число признаков)
coefs_ridge = np.zeros((alphas.shape[0], X.shape[1]))
# Для каждого значения коэффициента из alphas обучите регрессор Lasso
# и запишите веса в соответствующую строку матрицы coefs_lasso (вспомните встроенную в python функцию enumerate),
# а затем обучите Ridge и запишите веса в coefs_ridge.
for idx, alpha in enumerate(alphas):
las = Lasso(alpha=alpha)
las.fit(X, y)
coefs_lasso[idx] = las.coef_
rid = Ridge(alpha=alpha)
rid.fit(X, y)
coefs_ridge[idx] = rid.coef_
plt.figure(figsize=(8, 5))
for coef, feature in zip(coefs_lasso.T, df.columns):
plt.plot(alphas, coef, label=feature, color=np.random.rand(3))
plt.legend(loc="upper right", bbox_to_anchor=(1.4, 0.95))
plt.xlabel("alpha")
plt.ylabel("feature weight")
plt.title("Lasso")
plt.figure(figsize=(8, 5))
for coef, feature in zip(coefs_ridge.T, df.columns):
plt.plot(alphas, coef, label=feature, color=np.random.rand(3))
plt.legend(loc="upper right", bbox_to_anchor=(1.4, 0.95))
plt.xlabel("alpha")
plt.ylabel("feature weight")
plt.title("Ridge")
from sklearn.linear_model import LassoCV
# Код 3.2 (1 балл)
# Обучите регрессор LassoCV на всех параметрах регуляризации из alpha
# Постройте график _усредненного_ по строкам MSE в зависимости от alpha.
# Выведите выбранное alpha, а также пары "признак-коэффициент" для обученного вектора коэффициентов
alphas = np.arange(1, 100, 5)
las = LassoCV(alphas=alphas, cv=3)
las.fit(X, y)
avg = np.average(las.mse_path_, axis = 1)
indexes = np.argsort(las.alphas_)
plt.plot(alphas, avg[indexes])
plt.title('Зависимость MSE от alpha')
plt.xlabel('Alpha')
plt.ylabel('MSE')
print('Minimum MSE: %s\nAlpha: %s' % (np.min(avg), alphas[indexes[np.argmin(avg)]]))
# Код 3.3 (1 балл)
# Выведите значения alpha, соответствующие минимумам MSE на каждом разбиении (то есть по столбцам).
# На трех отдельных графиках визуализируйте столбцы .mse_path_
def build_by_column_index(las, index):
plt.figure(figsize=(8, 5))
indexes = np.argsort(las.alphas_)
avg = las.mse_path_[:, index]
plt.plot(alphas, avg[indexes])
plt.title('Зависимость MSE от alpha')
plt.xlabel('Alpha')
plt.ylabel('MSE')
print('Minimum MSE: %s\nAlpha: %s' % (np.min(avg), alphas[indexes[np.argmin(avg)]]))
build_by_column_index(las, 0)
build_by_column_index(las, 1)
build_by_column_index(las, 2)
las = Lasso(alpha=6)
las.fit(X, y)
for weight, name in sorted(zip(las.coef_, df.columns[:-1])):
print('%s - %s' % (name, weight))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Мы будем работать с датасетом "bikes_rent.csv", в котором по дням записаны календарная информация и погодные условия, характеризующие автоматизированные пункты проката велосипедов, а также число прокатов в этот день. Последнее мы будем предсказывать; таким образом, мы будем решать задачу регрессии.
Step2: Для каждого дня проката известны следующие признаки (как они были указаны в источнике данных)
Step3: Блок 1. Ответьте на вопросы (каждый 0.5 балла)
Step4: В выборке есть признаки, коррелирующие с целевым, а значит, задачу можно решать линейными методами.
Step5: На диагоналях, как и полагается, стоят единицы. Однако в матрице имеются еще две пары сильно коррелирующих столбцов
Step6: Признаки имеют разный масштаб, значит для дальнейшей работы нам лучше нормировать матрицу объекты-признаки.
Step7: Давайте обучим линейную регрессию на наших данных и посмотрим на веса признаков.
Step8: Мы видим, что веса при линейно-зависимых признаках по модулю значительно больше, чем при других признаках.
Step9: Проблема вторая
Step10: Визуализируем динамику весов при увеличении параметра регуляризации
Step11: Ответы на следующие вопросы можно давать, глядя на графики или выводя коэффициенты на печать.
Step12: Итак, мы выбрали некоторый параметр регуляризации. Давайте посмотрим, какие бы мы выбирали alpha, если бы делили выборку только один раз на обучающую и тестовую, то есть рассмотрим траектории MSE, соответствующие отдельным блокам выборки.
|
10,208
|
<ASSISTANT_TASK:>
Python Code:
print("Hello INBO_course!") # python 3(!)
4*5
3**2
(3 + 4)/2, 3 + 4/2,
21//5, 21%5 # floor division, modulo
3 > 4, 3 != 4, 3 == 4
my_variable_name = 'DS_course'
my_variable_name
name, age = 'John', 30
print('The age of {} is {:d}'.format(name, age))
import os
os.listdir()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%%file rehears1.py
#this writes a file in your directory, check it(!)
"A demo module."
def print_it():
Dummy function to print the string it
print('it')
import rehears1
rehears1.print_it()
%%file rehears2.py
#this writes a file in your directory, check it(!)
"A demo module."
def print_it():
Dummy function to print the string it
print('it')
def print_custom(my_input):
Dummy function to print the string that
print(my_input)
from rehears2 import print_it, print_custom
print_custom('DS_course')
a_float = 5.
type(a_float)
an_integer = 4
type(an_integer)
a_boolean = True
a_boolean
type(a_boolean)
3 > 4 # results in boolean
print(False) # test yourself with FALSE
a_string = "abcde"
a_string
a_string.capitalize(), a_string.upper(), a_string.endswith('f') # Check the other available methods for a_string yourself!
a_string.upper().replace('B', 'A')
a_string + a_string
a_string * 5
a_list = [1, 'a', 3, 4]
a_list
another_list = [1, 'a', 8.2, 4, ['z', 'y']]
another_list
a_list.append(8.2)
a_list
a_list.reverse()
a_list
a_list + ['b', 5]
[el*2 for el in a_list] # list comprehensions...a short for-loop
new_list = []
for element in a_list:
new_list.append(element*2)
print(new_list)
[el for el in dir(list) if not el[0] == '_']
# %load ../notebooks/_solutions/01-python-introduction75.py
sentence = "the quick brown fox jumps over the lazy dog"
# %load ../notebooks/_solutions/01-python-introduction82.py
a_dict = {'a': 1, 'b': 2}
a_dict['c'] = 3
a_dict['a'] = 5
a_dict
a_dict.keys(), a_dict.values(), a_dict.items()
an_empty_dic = dict() # or just {}
an_empty_dic
example_dict = {"timeseries": [2, 5, 3],
"parameter": 21.3,
"scenario": "a"}
example_dict
a_tuple = (1, 2, 4)
collect = a_list, a_dict
type(collect)
serie_of_numbers = 3, 4, 5
# Using tuples on the left-hand side of assignment allows you to extract fields
a, b, c = serie_of_numbers
print(c, b, a)
grades = [88, 72, 93, 94]
from IPython.display import SVG, display
display(SVG("../img/slicing-indexing.svg"))
grades[2]
from IPython.display import SVG, display
display(SVG("../img/slicing-slicing.svg"))
grades[1:3]
a_list = [1, 'a', 8.2, 4]
a_list[0], a_list[2]
a_string = "abcde"
a_string
a_string[2:4]
a_list[-2]
a_list = [0, 1, 2, 3]
a_list[:3]
a_list[::2]
a_dict = {'a': 1, 'b': 2}
a_dict['a']
a_tuple = (1, 2, 4)
a_tuple[1]
a_list
a_list[2] = 10 # element 2 changed -- mutable
a_list
a_tuple[1] = 10 # cfr. a_string -- immutable
a_string[3] = 'q'
for i in [1, 2, 3, 4]:
print(i)
for i in a_list: # anything that is a collection/container can be looped
print(i)
# %load ../notebooks/_solutions/01-python-introduction172.py
for i in a_dict: # items, keys, values
print(i)
for j, key in enumerate(a_dict.keys()):
print(j, key)
b = 7
while b < 10:
b+=1
print(b)
if 'a' in a_dict:
print('a is in!')
if 3 > 4:
print('This is valid')
testvalue = False # 0, 1, None, False, 4 > 3
if testvalue:
print('valid')
else:
raise Exception("Not valid!")
myvalue = 3
if isinstance(myvalue, str):
print('this is a string')
elif isinstance(myvalue, float):
print('this is a float')
elif isinstance(myvalue, list):
print('this is a list')
else:
print('no idea actually')
len(a_list)
a_list.reverse()
a_list
def custom_sum(a, b, verbose=False):
custom summation function
Parameters
----------
a : number
first number to sum
b : number
second number to sum
verbose: boolean
require additional information (True) or not (False)
Returns
-------
my_sum : number
sum of the provided two input elements
if verbose:
print('print a lot of information to the user')
my_sum = a + b
return my_sum
custom_sum(2, 3, verbose=False) # [3], '4'
def f1():
print('this is function 1 speaking...')
def f2():
print('this is function 2 speaking...')
def function_of_functions(inputfunction):
return inputfunction()
function_of_functions(f1)
add_two = (lambda x: x + 2)
add_two(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python is a calculator
Step2: also logical operators
Step3: Variable assignment
Step4: More information on print format
Step5: <div class="alert alert-warning">
Step6: Loading with defined short name (community agreement)
Step10: Loading functions from any file/module/package
Step11: <div class="alert alert-info">
Step12: integers
Step13: booleans
Step14: <div class="alert alert-warning">
Step15: Containers
Step16: A string is a collection of characters...
Step17: Lists
Step18: <div class="alert alert-info">
Step19: ADVANCED users area
Step20: list comprehensions are basically a short-handed version of a for-loop inside a list. Hence, the previous action is similar to
Step21: Another example checks the methods available for the list data type
Step22: <div class="alert alert-success">
Step23: <div class="alert alert-success">
Step24: <div class="alert alert-warning">
Step25: <div class="alert alert-warning">
Step26: <div class="alert alert-info">
Step27: Accessing container values
Step28: <div class="alert alert-info">
Step29: Select from...till
Step30: Select, counting backward
Step31: <div class="alert alert-warning">
Step32: From the first element until a given index
Step33: Dictionaries
Step34: Tuples
Step35: <div class="alert alert-info">
Step36: Control flows (optional)
Step37: <div class="alert alert-danger">
Step38: <div class="alert alert-success">
Step39: <div class="alert alert-info">
Step40: if statement
Step41: Functions
Step42: <div class="alert alert-danger">
Step44: <div class="alert alert-info">
Step45: Setup of a function
Step46: <div class="alert alert-success">
Step47: Anonymous functions (lambda)
|
10,209
|
<ASSISTANT_TASK:>
Python Code:
@interact(xin=(-5,5,0.1),yin=(-5,5,0.1))
def plotInt(xin,yin):
xmax = 2
vmax = 5
x = linspace(-xmax, xmax, 15) # Definimos el rango en el que se mueven las variables y el paso
v = linspace(-vmax, vmax, 15)
X, V = meshgrid(x,v) # Creamos una grilla con eso
# Definimos las constantes
w = 3
# Definimos las ecuaciones
Vp = -w**2*X
Xp = V
def resorte(y, t):
yp = y[1]
vp = -w**2*y[0]
return [yp, vp]
x0 = [xin, yin]
t = linspace(0,10,2000)
sh = integrate.odeint(resorte, x0, t)
fig = figure(figsize(10,5))
ax1 = subplot(121) # Hacer el grafico
quiver(X, V, Xp, Vp, angles='xy')
plot(x, [0]*len(x) ,[0]*len(v), v)
lfase = plot(sh[:,0],sh[:,1],'.')
ylim((-vmax,vmax))
xlim((-xmax,xmax))
# Retocarlo: tamanios, colores, leyendas, etc...
xlabel('$x$', fontsize=16)
ylabel('$\\dot{x}$',fontsize=16)
ax1.set_title('Espacio de fases')
ax2 = subplot(122) # Hacer otro grafico
lines = plot(t,sh )
xlabel('Tiempo [s]')
ax2.set_title('Espacio de tiempo')
legend(['Posicion','Velocidad'])
tight_layout()
ylim((-xmax, xmax))
@interact(thI=(0,np.pi,0.1),vI=(0,5,0.1))
def plotInt(thI, vI):
h = linspace(-pi,pi,15) # Definimos el rango en el que se mueven las variables y el paso
v = linspace(-10,10,15)
H, V = meshgrid(h,v) # Creamos una grilla con eso
# Definimos las constantes
g = 10
l = 1
# Definimos las ecuaciones
Vp = -g/l*sin(H)
Hp = V
def pendulo(y, t):
hp = y[1]
vp = -g/l*sin(y[0])
return [hp, vp]
y0 = [thI, vI]
t = linspace(0,10,2000)
sh = integrate.odeint(pendulo, y0, t)
fig = figure(figsize(10,5))
ax1 = subplot(121) # Hacer el grafico
quiver(H, V, Hp, Vp, angles='xy')
plot(h, [0]*len(h) ,[0]*len(v), v)
sh[:,0] = np.mod(sh[:,0] + np.pi, 2*np.pi) - np.pi
lfase = plot(sh[:,0], sh[:,1],'.')
# Retocarlo: tamanios, colores, leyendas, etc...
xlabel('$\\theta$', fontsize=16)
ylabel('$\\dot{\\theta}$', fontsize=16)
xlim((-pi,pi))
ylim((-10,10))
xtick = arange(-1,1.5,0.5)
x_label = [ r"$-\pi$",
r"$-\frac{\pi}{2}$", r"$0$",
r"$+\frac{\pi}{2}$", r"$+\pi$",
]
ax1.set_xticks(xtick*pi)
ax1.set_xticklabels(x_label, fontsize=20)
ax1.set_title('Espacio de fases')
ax2 = subplot(122) # Hacer otro grafico
lines = plot(t,sh )
ylim((-pi, pi))
ytick = [-pi, 0, pi]
y_label = [ r"$-\pi$", r"$0$", r"$+\pi$"]
ax2.set_yticks(ytick)
ax2.set_yticklabels(y_label, fontsize=20)
xlabel('Tiempo [s]')
ax2.set_title('Espacio de tiempo')
legend(['Posicion','Velocidad'])
tight_layout()
@interact(th0=(-2*np.pi,2*np.pi,0.1),v0=(-2,2,0.1))
def f(th0 = np.pi/3, v0 = 0):
h = linspace(-pi,pi,15) # Definimos el rango en el que se mueven las variables y el paso
v = linspace(-10,10,15)
H, V = meshgrid(h,v) # Creamos una grilla con eso
# Definimos las constantes
g = 10
l = 1
ga = 0.5
# Definimos las ecuaciones
Vp = -g/l*sin(H) - ga*V #SOLO CAMBIA ACA
Hp = V
def pendulo(y, t):
hp = y[1]
vp = -g/l*sin(y[0]) - ga* y[1] # Y ACAA
return [hp, vp]
y0 = [th0, v0]
t = linspace(0,10,2000)
sh = integrate.odeint(pendulo, y0, t)
fig = figure(figsize(10,5))
ax1 = subplot(121) # Hacer el grafico
quiver(H, V, Hp, Vp, angles='xy')
plot(h, [0]*len(h) , h , -g/l/ga*sin(h)) # Dibujar nulclinas
lfase = plot(sh[:,0],sh[:,1],'.')
# Retocarlo: tamanios, colores, leyendas, etc...
xlabel('$\\theta$', fontsize=16)
ylabel('$\\dot{\\theta}$',fontsize=16)
xlim((-pi,pi))
ylim((-10,10))
xtick = arange(-1,1.5,0.5)
x_label = [ r"$-\pi$",
r"$-\frac{\pi}{2}$", r"$0$",
r"$+\frac{\pi}{2}$", r"$+\pi$",
]
ax1.set_xticks(xtick*pi)
ax1.set_xticklabels(x_label, fontsize=20)
ax1.set_title('Espacio de fases')
ax2 = subplot(122) # Hacer otro grafico
lines = plot(t,sh )
ylim((-pi, pi))
ytick = [-pi, 0, pi]
y_label = [ r"$-\pi$", r"$0$", r"$+\pi$"]
ax2.set_yticks(ytick)
ax2.set_yticklabels(y_label, fontsize=20)
xlabel('Tiempo [s]')
ax2.set_title('Espacio de tiempo')
legend(['Posicion','Velocidad'])
tight_layout()
@interact(x0=(-1,1,0.1),v0=(0,1,0.1))
def f(x0=0,v0=1):
ymax = 2
vmax = 5
y = linspace(-ymax, ymax, 15) # Definimos el rango en el que se mueven las variables y el paso
v = linspace(-vmax, vmax, 15)
Y, V = meshgrid(y,v) # Creamos una grilla con eso
# Definimos las constantes
k = 10
l = 1
l0 = 1.2
m = 1
# Definimos las ecuaciones
Vp = -2*k/m*(1-l0/(sqrt(Y**2+l**2)))*Y
Yp = V
def resorte(y, t):
yp = y[1]
vp = -2*k/m*(1-l0/(sqrt(y[0]**2+l**2)))*y[0]
return [yp, vp]
y0 = [x0, v0]
t = linspace(0,10,2000)
sh = integrate.odeint(resorte, y0, t)
fig = figure(figsize(10,5))
ax1 = subplot(121) # Hacer el grafico
quiver(Y, V, Yp, Vp, angles='xy')
plot(y, [0]*len(y) ,[0]*len(v), v)
lfase = plot(sh[:,0],sh[:,1],'.')
ylim((-vmax,vmax))
xlim((-ymax,ymax))
# Retocarlo: tamanios, colores, leyendas, etc...
xlabel('$y$', fontsize=16)
ylabel('$\\dot{y}$', fontsize=16)
ax1.set_title('Espacio de fases')
ax2 = subplot(122) # Hacer otro grafico
lines = plot(t,sh )
xlabel('Tiempo [s]')
ax2.set_title('Espacio de tiempo')
legend(['Posicion','Velocidad'])
tight_layout()
ylim((-ymax, ymax))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: El Pendulo
Step2: El Pendulo con perdidas
Step3: El resorte Oscilaciones longitudinales.
|
10,210
|
<ASSISTANT_TASK:>
Python Code:
import magma as m
from mantle import DFF
class TFF(m.Circuit):
io = m.IO(O=m.Out(m.Bit)) + m.ClockIO()
ff = DFF()
m.wire( ff(~ff.O), io.O )
print(TFF)
class RippleCounter(m.Generator):
@staticmethod
def generate(width: int):
class _RippleCounter(m.Circuit):
name = f'Ripple{width}'
io = m.IO(O=m.Out(m.Bits[width])) + m.ClockIO()
tffs = [TFF(name=f"tff{i}") for i in range(width)]
O = io.CLK
for i in range(width):
m.wire(m.clock(O), tffs[i].CLK)
O = tffs[i].O
m.wire(O, io.O[i])
return _RippleCounter
Ripple4 = RippleCounter.generate(4)
print(repr(Ripple4))
import fault
tester = fault.Tester(Ripple4, Ripple4.CLK)
for i in range(1 << 4):
tester.step(2)
tester.print("O=%x\n", Ripple4.O)
tester.compile_and_run(target="verilator", disp_type="realtime")
m.compile("build/ripple", Ripple4, inline=True)
%%bash
cat build/ripple.v
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the last example, we defined a function that created a
Step2: Let's inspect the interface to see the result of appending m.ClockIO().
Step3: Now we'll define a generator for our RippleCounter that accepts a single argument width. A generator in magma is a subclass of m.Generator that defines a static method generate which returns Magma Circuit.
Step4: Now we can generate a 4-bit RippleCounter by calling the generate function directly.
Step5: Let's test our circuit using fault. Magma's Python simulator does not support asynchronous logic, so we'll use verilator.
Step6: We can also look at the generated verilog
|
10,211
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import hyperspy.api as hs
import matplotlib.pyplot as plt
import pyxem as pxm
dp = hs.load('./data/06/mgo_nanoparticles.hdf5')
dp
dp.plot(cmap='magma_r')
sigma_min = 1.7
sigma_max = 13.2
dp_rb= dp.subtract_diffraction_background('difference of gaussians',
min_sigma=sigma_min,
max_sigma=sigma_max)
dp.data
dp_rb.data.compute()
dp_rb.plot(cmap='magma_r')
shifts = dp_rb.center_direct_beam(method='cross_correlate',
half_square_width=15,
return_shifts=True,
radius_start=2,
radius_finish=6)
# this step is likely to be quiet slow
shifts.compute()
dp_rb.align2D(shifts=shifts, crop=False)
scale = 0.03246*2
scale_real = 3.03*2
dp.set_diffraction_calibration(scale)
dp.set_scan_calibration(scale_real)
# see https://github.com/pyxem/pyxem/issues/717 for why have to do this
dp_rb.compute()
dp_rb = pxm.signals.ElectronDiffraction2D(dp_rb)
dp_rb.set_diffraction_calibration(scale)
dp_rb.set_scan_calibration(scale_real)
import hyperspy
hyperspy.__version__
peaks = dp_rb.find_peaks(method='minmax',
interactive=False)
from pyxem.signals.diffraction_vectors import DiffractionVectors
peaks = DiffractionVectors.from_peaks(peaks,center=(36,36),calibration=scale)
diff_map = peaks.get_diffracting_pixels_map()
diff_map.plot()
# Currently a Bug. Need to Fix
peaks.detector_shape= (72,72)
peaks.pixel_calibration=scale
peaks_filtered = peaks.filter_detector_edge(exclude_width=2)
from pyxem.generators.subpixelrefinement_generator import SubpixelrefinementGenerator
from pyxem.signals.diffraction_vectors import DiffractionVectors
refine_gen = SubpixelrefinementGenerator(dp_rb, peaks_filtered)
peaks_refined = DiffractionVectors(refine_gen.center_of_mass_method(square_size=4))
peaks_refined.axes_manager.set_signal_dimension(0)
def replace_nan(data):
new_data = data[0]
new_data[np.isnan(data[0])]=0
return np.array(new_data)
peaks_refined = peaks_refined.map(replace_nan, inplace=False, ragged=True)
peaks_refined.set_signal_type("diffraction_vectors")
distance_threshold = scale*0.89
min_samples = 10
unique_peaks = peaks_refined.get_unique_vectors(method='DBSCAN',
distance_threshold=distance_threshold,
min_samples=min_samples)
print(np.shape(unique_peaks.data)[0], ' unique vectors were found.')
radius_px = dp_rb.axes_manager.signal_shape[0]/2
reciprocal_radius = radius_px * scale
unique_peaks.plot_diffraction_vectors(
method='DBSCAN',
unique_vectors=unique_peaks,
distance_threshold=distance_threshold,
xlim=reciprocal_radius,
ylim=reciprocal_radius,
min_samples=min_samples,
image_to_plot_on=dp_rb.max(),
image_cmap='magma_r',
plot_label_colors=False)
peaks_refined.plot_diffraction_vectors(
method='DBSCAN',
xlim=reciprocal_radius,
ylim=reciprocal_radius,
unique_vectors=unique_peaks,
distance_threshold=distance_threshold,
min_samples=min_samples,
image_to_plot_on=dp_rb.max(),
image_cmap='gray_r',
plot_label_colors=True,
distance_threshold_all=scale*0.1)
Gs = unique_peaks.filter_magnitude(min_magnitude=10*scale,
max_magnitude=np.inf)
print(np.shape(Gs)[0], ' unique vectors.')
Gs.plot_diffraction_vectors(unique_vectors=Gs,
distance_threshold=distance_threshold,
xlim=reciprocal_radius,
ylim=reciprocal_radius,
min_samples=min_samples,
image_to_plot_on=dp_rb.max(),
image_cmap='magma',
plot_label_colors=False)
np.save('peaks.npy', Gs.data)
Gs = np.load('peaks.npy', allow_pickle=True)
Gs = pxm.signals.DiffractionVectors(Gs)
Gs.axes_manager.set_signal_dimension(0)
from pyxem.generators import VirtualDarkFieldGenerator
radius=scale*2
vdfgen = VirtualDarkFieldGenerator(dp_rb, Gs)
VDFs = vdfgen.get_virtual_dark_field_images(radius=radius)
#%matplotlib notebook
VDFs.plot(cmap='magma', scalebar=False)
from pyxem.utils.segment_utils import separate_watershed
min_distance = 5.5
min_size = 10
max_size = 1000
max_number_of_grains = 1000
marker_radius = 2
exclude_border = 2
i = 25
sep_i = separate_watershed(
VDFs.inav[i].data, min_distance=min_distance, min_size=min_size,
max_size=max_size, max_number_of_grains=max_number_of_grains,
exclude_border=exclude_border, marker_radius=marker_radius,
threshold=True, plot_on=True)
segs = VDFs.get_vdf_segments(min_distance=min_distance,
min_size=min_size,
max_size = max_size,
max_number_of_grains = max_number_of_grains,
exclude_border=exclude_border,
marker_radius=marker_radius,
threshold=True)
print(np.shape(segs.segments)[0],' segments were found.')
segs.segments.plot(cmap='magma_r')
ncc_vdf = segs.get_ncc_matrix()
ncc_vdf.plot(scalebar=False, cmap='RdBu')
corr_threshold=0.7
vector_threshold=5
segment_threshold=4
corrsegs = segs.correlate_vdf_segments(
corr_threshold=corr_threshold, vector_threshold=vector_threshold,
segment_threshold=segment_threshold)
print(np.shape(corrsegs.segments)[0],' correlated segments were found.')
sigma = scale*1.5
virtual_sig = corrsegs.get_virtual_electron_diffraction(
calibration=scale, shape=(int(radius_px*2), int(radius_px*2)), sigma=sigma)
virtual_sig.set_diffraction_calibration(scale)
hs.plot.plot_images(corrsegs.segments, cmap='magma_r', axes_decor='off',
per_row=np.shape(corrsegs.segments)[0],
suptitle='', scalebar=False, scalebar_color='white',
colorbar=False,
padding={'top': 0.95, 'bottom': 0.05,
'left': 0.05, 'right':0.78})
hs.plot.plot_images(virtual_sig, cmap='magma_r', axes_decor='off',
per_row=np.shape(corrsegs.segments)[0],
suptitle='', scalebar=False, scalebar_color='white',
colorbar=False,
padding={'top': 0.95, 'bottom': 0.05,
'left': 0.05, 'right': 0.78})
dpm = pxm.signals.Diffraction2D(dp.inav[0,0])
signal_mask = dpm.get_direct_beam_mask(radius=10)
signal_mask.plot()
dp.change_dtype('float32')
dp.decomposition(algorithm='svd',
normalize_poissonian_noise=True,
centre=None,
signal_mask=signal_mask.data)
dp.plot_decomposition_results()
num_comp=11
ax = dp.plot_explained_variance_ratio(n=200, threshold=num_comp,
hline=True, xaxis_labeling='ordinal',
signal_fmt={'color':'k', 'marker':'.'},
noise_fmt={'color':'gray', 'marker':'.'})
dp.decomposition(normalize_poissonian_noise=True,
algorithm='nmf',
output_dimension=num_comp,
signal_mask=signal_mask.data)
dp_nmf = dp.get_decomposition_model(components=np.arange(num_comp))
factors = dp_nmf.get_decomposition_factors()
loadings = dp_nmf.get_decomposition_loadings()
hs.plot.plot_images(loadings, cmap='magma_r', axes_decor='off', per_row=11,
suptitle='', scalebar=False, scalebar_color='white', colorbar=False,
padding={'top': 0.95, 'bottom': 0.05,
'left': 0.05, 'right':0.78})
hs.plot.plot_images(factors, cmap='magma_r', axes_decor='off', per_row=11,
suptitle='', scalebar=False, scalebar_color='white', colorbar=False,
padding={'top': 0.95, 'bottom': 0.05,
'left': 0.05, 'right':0.78})
from hyperspy.signals import Signal2D
factors = Signal2D(np.delete(factors.data, [0, 4], axis = 0))
loadings = Signal2D(np.delete(loadings.data, [0, 4], axis = 0))
hs.plot.plot_images(factors, cmap='magma_r', axes_decor='off',
per_row=9, suptitle='', scalebar=False,
scalebar_color='white', colorbar=False,
padding={'top': 0.95, 'bottom': 0.05,
'left': 0.05, 'right':0.78})
hs.plot.plot_images(loadings, cmap='magma_r', axes_decor='off',
per_row=9, suptitle='', scalebar=False,
scalebar_color='white', colorbar=False,
padding={'top': 0.95, 'bottom': 0.05,
'left': 0.05, 'right':0.78})
from pyxem.signals.segments import LearningSegment
learn = LearningSegment(factors=factors, loadings=loadings)
ncc_nmf = learn.get_ncc_matrix()
ncc_nmf.plot(scalebar=False, cmap='RdBu')
corr_th_factors = 0.45
corr_th_loadings = 0.3
learn_corr = learn.correlate_learning_segments(corr_th_factors=corr_th_factors,
corr_th_loadings=corr_th_loadings)
hs.plot.plot_images(learn_corr.loadings, cmap='magma_r', axes_decor='off',
per_row=7, suptitle='', scalebar=False,
scalebar_color='white', colorbar=False,
padding={'top': 0.95, 'bottom': 0.05,
'left': 0.05, 'right':0.78})
hs.plot.plot_images(learn_corr.factors, cmap='magma_r', axes_decor='off',
per_row=7, suptitle='', scalebar=False,
scalebar_color='white', colorbar=False,
padding={'top': 0.95, 'bottom': 0.05,
'left': 0.05, 'right':0.78})
from pyxem.utils.segment_utils import separate_watershed
min_distance = 10
min_size = 50
max_size = 100000
max_number_of_grains = 100000
marker_radius = 2
exclude_border = 1
threshold = True
i =1
sep_i = separate_watershed(
learn_corr.loadings.data[i], min_distance=min_distance,
min_size=min_size, max_size=max_size,
max_number_of_grains=max_number_of_grains,
exclude_border=exclude_border,
marker_radius=marker_radius, threshold=True, plot_on=True)
min_intensity_threshold = 10000
learn_corr_seg = learn_corr.separate_learning_segments(
min_intensity_threshold=min_intensity_threshold,
min_distance = min_distance, min_size = min_size,
max_size = max_size,
max_number_of_grains = max_number_of_grains,
exclude_border = exclude_border,
marker_radius = marker_radius, threshold = True)
hs.plot.plot_images(learn_corr_seg.loadings,
cmap='magma_r', axes_decor='off',
per_row=10, suptitle='', scalebar=False,
scalebar_color='white', colorbar=False,
padding={'top': 0.95, 'bottom': 0.05,
'left': 0.05, 'right':0.78})
hs.plot.plot_images(learn_corr_seg.factors,
cmap='magma_r', axes_decor='off',
per_row=10, suptitle='', scalebar=False,
scalebar_color='white', colorbar=False,
padding={'top': 0.95, 'bottom': 0.05,
'left': 0.05, 'right':0.78})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load demonstration data
Step2: Plot data to inspect
Step3: Remove the background
Step4: Plot the background subtracted data
Step5: Find the position of the direct beam in the background subtracted data.
Step6: Apply the same shifts to the raw data.
Step7: Set calibrations
Step8: <a id='vdf'></a>
Step9: Visualise the number of diffraction peaks found at each probe position
Step10: Exclude peaks too close to the detector edge for sub-pixel refinement.
Step11: Refine the peak positions using center of mass
Step12: 2.2. Determine Unique Peaks
Step13: Visualise the detected unique peaks by plotting them on the maximum of the signal.
Step14: Visualise both the clusters and the unique peaks obtained after DBSCAN clustering.
Step15: Filter the unique vectors by magnitude in order to exclude the direct beam from the following analysis
Step16: Plot the unique vectors
Step17: Optionally save and load the unique peaks
Step18: 2.3. Virtual Imaging & Segmentation
Step19: Plot the VDF images for inspection
Step20: First find adequate parameters by looking at watershed segmentation of a single VDF image.
Step21: Perform segmentation on all the VDF images
Step22: Plot the segments for inspection
Step23: Calculate normalised cross-correlations between all VDF image segments to identify those that are related to the same crystal.
Step24: If the correlation value exceeds corr_threshold for certain segments, those segments are summed. These segments are discarded if the number of these segments are below vector_threshold, as this number corresponds to the number of detected diffraction peaks associated with the single crystal. The vector_threshold criteria is included to avoid including segment images resulting from noise or incorrect segmentation.
Step25: Simulate virtual diffraction patterns for each summed segment
Step26: Plot the final results from the VDF image-based segmentation
Step27: <a id='nmf'></a>
Step28: Perform single value decomposition (SVD)
Step29: Investigate the scree plot and use it as a guide to determine the number of components
Step30: Perform NMF decomposition with specified number of components
Step31: Plot the NMF results
Step32: Discard the components related to background (#0) and to the carbon film (#4)
Step33: 3.2. Correlate NMF Loading Maps
Step34: Perform correlation and summation of the factors and loadings
Step35: Plot the NMF reuslts after correlation and summation
Step36: First investigate how the parameters influence the segmentation on
Step37: Set a threshold for the minimum intensity value that a loading segment must contain in order to be kept.
Step38: Plot the final results from the NMF-based segmentation
|
10,212
|
<ASSISTANT_TASK:>
Python Code:
import toytree
import toyplot
import numpy as np
# generate a random tree
tre = toytree.rtree.unittree(ntips=10, seed=12345)
# the .treenode attribute of the ToyTree returns its root TreeNode
tre.treenode
# the .idx_dict of a toytree makes TreeNodes accessible by index
tre.idx_dict
print('levelorder:', [node.idx for node in tre.treenode.traverse("levelorder")])
print('preorder: ', [node.idx for node in tre.treenode.traverse("preorder")])
print('postorder: ', [node.idx for node in tre.treenode.traverse("postorder")])
tre.draw(node_labels=True, node_sizes=16);
# traverse the tree and access node attributes
for node in tre.treenode.traverse(strategy="levelorder"):
print("{:<5} {:<5} {:<5} {:<5}".format(
node.idx, node.name, node.is_leaf(), node.is_root()
)
)
# see available features on a ToyTree
tre.features
# set a feature a few nodes with a new name
tre = tre.set_node_values(
feature="name",
values={0: 'tip-0', 1: 'tip-1', 2: 'tip-2'},
)
# set a feature to every node of a random integer in 1-5
tre = tre.set_node_values(
feature="randomint",
values={idx: np.random.randint(1, 5) for idx in tre.idx_dict},
)
# set a feature to every node for the number of descendants
tre = tre.set_node_values(
feature="ndesc",
values={
idx: len(node.get_leaves())
for (idx, node) in tre.idx_dict.items()
}
)
# add a new feature to every node
for node in tre.treenode.traverse():
node.add_feature("ndesc", len(node.get_leaves()))
# ndesc is now an available feature alongside the defaults
tre.features
# it can be accessed from the ToyTree object using .get_node_values()
tre.get_node_values('ndesc', True, True)
# and can be accessed by shortcut using just the feature name to 'node_labels'
tre.draw(node_labels=("ndesc", 1, 0), node_sizes=15);
# traverse the tree and modify nodes (add new 'color' feature)
for node in tre.treenode.traverse():
if node.is_leaf():
node.add_feature('color', toytree.colors[1])
else:
node.add_feature('color', toytree.colors[2])
# store color list with values for tips and root
colors = tre.get_node_values('color', show_root=1, show_tips=1)
# draw tree with node colors
tre.draw(node_labels=False, node_colors=colors, node_sizes=15);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TreeNode objects are always nested inside of ToyTree objects, and accessed from ToyTrees. When you use .treenode to access a TreeNode from a ToyTree you are actually accessing the top level node of the tree structure, the root. The root TreeNode is connected to every other TreeNode in the tree, and together they describe the tree structure.
Step2: Traversing TreeNodes
Step3: TreeNodes have a large number of attributes and functions available to them which you can explore using tab-completion in a notebook and from the ete3 tutorial. In general, only advanced users will need to access attributes of the TreeNodes directly. For example, it is easier to access node idx and name labels from ToyTrees than from TreeNodes, since ToyTrees will return the values in the order they will be plotted.
Step4: Adding features to TreeNodes
Step5: Let's say we wanted to plot a value on each node of a toytree. You can use the toytree function .set_node_values() to set a value to each node. This takes the feature name, a dictionary mapping values to idx labels, and optionally a default value that is assigned to all other nodes. You can modify existing features or set new features.
Step6: Another potentially useful 'feature' to access includes statistics about the tree. For example, we may want to measure the number of extant descendants of each node on a tree. Such things can be measured directly from TreeNode objects. Below I use get_leaves() as an example. You can see the ete3 docs for more info on TreeNode functions and attributes.
Step7: The set_node_values() function of toytrees operates similarly to the loop below which visits each TreeNode of the tree and adds a feature. The .traverse() function of treenodes is convenient for accessing all nodes.
Step8: Modifying features of TreeNodes
Step9: Here is another example where color values are stored on TreeNodes and then retrieved from the ToyTree, and then used as draw argument to color nodes based on their TreeNode attribute. The nodes are colored based on whether the TreeNode was True or False for the .is_leaf(). We use the default color palette of toytree accessed from toytree.colors.
|
10,213
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import Image, display
display(Image(url='images/TipicalValuesLongChannel.png'))
%matplotlib inline
import math
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pylab as plb
def matrix(m_length,m_width):
"Return matrix with no homogeneus resitivity"
m = np.zeros((m_length,m_width))
return m
Material_length=963e-6
scmos_process = 3.e-6
wafer_thickness = 0.05e-6
KP_n_base = 120e-6
KP_p_base = 40e-6
size_m = Material_length/scmos_process
delta_KP=0.05e-6
plt.style.use('ggplot')
KP_n_Ideal = matrix(int(size_m),int(size_m))
for i in range(0,int(math.sqrt(KP_n_Ideal.size))):
for j in range(0,int(math.sqrt(KP_n_Ideal.size))):
KP_n_Ideal[i][j]= KP_n_base
plt.matshow(KP_n_Ideal)
plt.show()
KP_p_Ideal = matrix(int(size_m),int(size_m))
for i in range(0,int(math.sqrt(KP_p_Ideal.size))):
for j in range(0,int(math.sqrt(KP_p_Ideal.size))):
KP_p_Ideal[i][j]= KP_p_base
plt.matshow(KP_p_Ideal)
plt.show()
Kp_n = matrix(int(size_m),int(size_m))
def corner_kp_n():
for i in range(0,int(math.sqrt(Kp_n.size))):
for j in range(0,int(math.sqrt(Kp_n.size))):
Kp_n[i][j]= KP_n_base+(i+j)*delta_KP
corner_kp_n()
plt.matshow(Kp_n)
plt.show()
Kp_p = matrix(int(size_m),int(size_m))
def corner_kp_p():
for i in range(0,int(math.sqrt(Kp_p.size))):
for j in range(0,int(math.sqrt(Kp_p.size))):
Kp_p[i][j]= KP_p_base+(i+j)*delta_KP
corner_kp_p()
plt.matshow(Kp_p)
plt.show()
Kp_n = matrix(int(size_m),int(size_m))
def centroid_KP_n(center,i,j):
"Return a value of KP_n for a single shape"
difJ=abs(center-j)
difI=abs(center-i)
xy=0
if difJ > difI:
xy=difJ
else:
xy=difI
Kpn= KP_n_base+(xy)*delta_KP
return Kpn
def center_kp_n():
for i in range(0,int(math.sqrt(Kp_n.size))):
for j in range(0,int(math.sqrt(Kp_n.size))):
Kp_n[i][j]= centroid_KP_n((int(math.sqrt(Kp_n.size))-1)/2,i,j)
center_kp_n()
plt.matshow(Kp_n)
plt.show()
Kp_p = matrix(int(size_m),int(size_m))
def centroid_KP_p(center,i,j):
"Return a value of KP_n for a single shape"
difJ=abs(center-j)
difI=abs(center-i)
xy=0
if difJ > difI:
xy=difJ
else:
xy=difI
Kpp= KP_p_base+(xy)*delta_KP
return Kpp
def center_kp_p():
for i in range(0,int(math.sqrt(Kp_p.size))):
for j in range(0,int(math.sqrt(Kp_p.size))):
Kp_p[i][j]= centroid_KP_p((int(math.sqrt(Kp_p.size))-1)/2,i,j)
center_kp_p()
plt.matshow(Kp_p)
plt.show()
paint_matrix = matrix(int(size_m),int(size_m))
plt.matshow(paint_matrix)
plt.show()
display(Image(url='images/WLCMOS.png'))
W_base=2
WTA=60
WTB=60
WTC=60
WTD=60
lenght_active_si_transistor = 6
L=2
display(Image(url='images/Transistor.png'))
print("Diseno para ahorro de espacio en el transistor, W_base de 2 unidades")
paint_matrix = matrix(int(size_m),int(size_m))
common_source=9
def opcion1():
M_center=int((int(math.sqrt(paint_matrix.size))-1)/2)
TA = matrix(W_base,int((WTA/W_base+1)*(lenght_active_si_transistor/2)))
for i in range(M_center-2-TA.shape[0],M_center-2):
for j in range(M_center-2-TA.shape[1],M_center-2):
paint_matrix[i][j]=1
TB = matrix(W_base,int((WTB/W_base+1)*(lenght_active_si_transistor/2)))
for i in range(M_center-2-TB.shape[0],M_center-2):
for j in range(M_center+3,TB.shape[1]+M_center+3):
paint_matrix[i][j]=2
TC = matrix(W_base,int((WTC/W_base+1)*(lenght_active_si_transistor/2)))
for i in range(M_center+3,TC.shape[0]+M_center+3):
for j in range(M_center-2-TC.shape[1],M_center-2):
paint_matrix[i][j]=3
TD = matrix(W_base,int((WTD/W_base+1)*(lenght_active_si_transistor/2)))
for i in range(M_center+3,TD.shape[0]+M_center+3):
for j in range(M_center+3,TD.shape[1]+M_center+3):
paint_matrix[i][j]=4
opcion1()
plt.matshow(paint_matrix)
plt.show()
display(Image(url='images/currentmirrorlayout.png'))
print("Diseno propuesto para layout del espejo de corriente")
paint_matrix = matrix(int(size_m),int(size_m))
common_source=9
def opcion2():
M_center=int((int(math.sqrt(paint_matrix.size))-1)/2)
Trasistor_branch_per_side=1
shape_sides=4
Transistor_branches=Trasistor_branch_per_side*shape_sides
branch_W=int(WTA/Transistor_branches)
TSource=matrix(int(2*branch_W+(lenght_active_si_transistor/2)*3),int(2*branch_W+(lenght_active_si_transistor/2)*3))
for i in range(M_center-int(TSource.shape[0]/2),M_center+int(TSource.shape[0]/2)):
for j in range(M_center-int(TSource.shape[0]/2),M_center+int(TSource.shape[0]/2)):
paint_matrix[i][j]=common_source
TA_1 = matrix(branch_W,int(lenght_active_si_transistor))
for i in range(M_center-2-TA_1.shape[0],M_center-2):
for j in range(M_center-TA_1.shape[1]-int(TSource.shape[0]/2),M_center-int(TSource.shape[0]/2)):
paint_matrix[i][j]=4
TA_2 = matrix(branch_W,int(lenght_active_si_transistor))
for i in range(M_center+3,TA_2.shape[0]+M_center+3):
for j in range(M_center+int(TSource.shape[0]/2)-1,TA_2.shape[1]+M_center+int(TSource.shape[0]/2)-1):
paint_matrix[i][j]=4
TA_3 = matrix(branch_W,int(lenght_active_si_transistor))
for i in range(M_center-TA_3.shape[1]-int(TSource.shape[0]/2),M_center-int(TSource.shape[0]/2)):
for j in range(M_center+3,TA_3.shape[0]+M_center+3):
paint_matrix[i][j]=4
TA_4 = matrix(branch_W,int(lenght_active_si_transistor))
for i in range(M_center+int(TSource.shape[0]/2)-1,TA_4.shape[1]+M_center+int(TSource.shape[0]/2)-1):
for j in range(M_center-2-TA_4.shape[0],M_center-2):
paint_matrix[i][j]=4
TB_1 = matrix(branch_W,int(lenght_active_si_transistor))
for i in range(M_center+3,TB_1.shape[0]+M_center+3):
for j in range(M_center-TB_1.shape[1]-int(TSource.shape[0]/2),M_center-int(TSource.shape[0]/2)):
paint_matrix[i][j]=5
TB_2 = matrix(branch_W,int(lenght_active_si_transistor))
for i in range(M_center-2-TB_2.shape[0],M_center-2):
for j in range(M_center+int(TSource.shape[0]/2)-1,TB_2.shape[1]+M_center+int(TSource.shape[0]/2)-1):
paint_matrix[i][j]=5
TB_3 = matrix(branch_W,int(lenght_active_si_transistor))
for i in range(M_center-TB_3.shape[1]-int(TSource.shape[0]/2),M_center-int(TSource.shape[0]/2)):
for j in range(M_center-2-TB_3.shape[0],M_center-2):
paint_matrix[i][j]=5
TB_4 = matrix(branch_W,int(lenght_active_si_transistor))
for i in range(M_center+int(TSource.shape[0]/2)-1,TB_4.shape[1]+M_center+int(TSource.shape[0]/2)-1):
for j in range(M_center+3,TB_4.shape[0]+M_center+3):
paint_matrix[i][j]=5
opcion2()
plt.matshow(paint_matrix)
plt.show()
def prom_KP_n_for_transistor(transistor_num,common_source):
"Return a prom value of KP_n for a single transistor in the wafer"
Kpn_sum=0
Kpn_found=0
for i in range(0,int(math.sqrt(paint_matrix.size))):
for j in range(0,int(math.sqrt(paint_matrix.size))):
if paint_matrix[i][j] == transistor_num:
Kpn_sum += Kp_n[i][j]
Kpn_found += 1
if paint_matrix[i][j] == common_source:
Kpn_sum += Kp_n[i][j]
Kpn_found += 1
Kpn_prom=Kpn_sum/(Kpn_found)
return Kpn_prom
def prom_KP_p_for_transistor(transistor_num,common_source):
"Return a prom value of KP_p for a single transistor in the wafer"
Kpp_sum=0
Kpp_found=0
for i in range(0,int(math.sqrt(paint_matrix.size))):
for j in range(0,int(math.sqrt(paint_matrix.size))):
if paint_matrix[i][j] == transistor_num:
Kpp_sum += Kp_p[i][j]
Kpp_found += 1
if paint_matrix[i][j] == common_source:
Kpp_sum += Kp_p[i][j]
Kpp_found += 1
Kpp_prom=Kpp_sum/(Kpp_found)
return Kpp_prom
print("Para la opcion de diseno 1: Transistores ahorro de espacio con centroide: \n")
paint_matrix = matrix(int(size_m),int(size_m))
opcion1()
corner_kp_n()
plt.matshow(Kp_n)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpn_T1=prom_KP_n_for_transistor(1,common_source)
Kpn_T2=prom_KP_n_for_transistor(2,common_source)
Kpn_T3=prom_KP_n_for_transistor(3,common_source)
Kpn_T4=prom_KP_n_for_transistor(4,common_source)
print("Kp_n Transistor 1: "+str(Kpn_T1))
print("Kp_n Transistor 2: "+str(Kpn_T2))
print("Kp_n Transistor 3: "+str(Kpn_T3))
print("Kp_n Transistor 4: "+str(Kpn_T4))
corner_kp_p()
Kpp_T1=prom_KP_p_for_transistor(1,common_source)
Kpp_T2=prom_KP_p_for_transistor(2,common_source)
Kpp_T3=prom_KP_p_for_transistor(3,common_source)
Kpp_T4=prom_KP_p_for_transistor(4,common_source)
print("Kp_p Transistor 1: "+str(Kpp_T1))
print("Kp_p Transistor 2: "+str(Kpp_T2))
print("Kp_p Transistor 3: "+str(Kpp_T3))
print("Kp_p Transistor 4: "+str(Kpp_T4))
print("Para la opcion de diseno 2: Transistores con source comun en centroide: \n")
paint_matrix = matrix(int(size_m),int(size_m))
opcion2()
corner_kp_n()
plt.matshow(Kp_n)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpn_T1=prom_KP_n_for_transistor(4,common_source)
Kpn_T2=prom_KP_n_for_transistor(5,common_source)
print("Kp_n Transistor 1: "+str(Kpn_T1))
print("Kp_n Transistor 2: "+str(Kpn_T2))
corner_kp_p()
Kpp_T1=prom_KP_p_for_transistor(4,common_source)
Kpp_T2=prom_KP_p_for_transistor(5,common_source)
print("Kp_p Transistor 1: "+str(Kpp_T1))
print("Kp_p Transistor 2: "+str(Kpp_T2))
print("Para la opcion de diseno 1: Transistores ahorro de espacio con centroide: \n")
paint_matrix = matrix(int(size_m),int(size_m))
opcion1()
center_kp_n()
plt.matshow(Kp_n)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpn_T1=prom_KP_n_for_transistor(1,common_source)
Kpn_T2=prom_KP_n_for_transistor(2,common_source)
Kpn_T3=prom_KP_n_for_transistor(3,common_source)
Kpn_T4=prom_KP_n_for_transistor(4,common_source)
print("Kp_n Transistor 1: "+str(Kpn_T1))
print("Kp_n Transistor 2: "+str(Kpn_T2))
print("Kp_n Transistor 3: "+str(Kpn_T3))
print("Kp_n Transistor 4: "+str(Kpn_T4))
center_kp_p()
Kpp_T1=prom_KP_p_for_transistor(1,common_source)
Kpp_T2=prom_KP_p_for_transistor(2,common_source)
Kpp_T3=prom_KP_p_for_transistor(3,common_source)
Kpp_T4=prom_KP_p_for_transistor(4,common_source)
print("Kp_p Transistor 1: "+str(Kpp_T1))
print("Kp_p Transistor 2: "+str(Kpp_T2))
print("Kp_p Transistor 3: "+str(Kpp_T3))
print("Kp_p Transistor 4: "+str(Kpp_T4))
print("Para la opcion de diseno 2: Transistores con source comun en centroide: \n")
paint_matrix = matrix(int(size_m),int(size_m))
opcion2()
center_kp_n()
plt.matshow(Kp_n)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpn_T1=prom_KP_n_for_transistor(4,common_source)
Kpn_T2=prom_KP_n_for_transistor(5,common_source)
print("Kp_n Transistor 1: "+str(Kpn_T1))
print("Kp_n Transistor 2: "+str(Kpn_T2))
center_kp_p()
Kpp_T1=prom_KP_p_for_transistor(4,common_source)
Kpp_T2=prom_KP_p_for_transistor(5,common_source)
print("Kp_p Transistor 1: "+str(Kpp_T1))
print("Kp_p Transistor 2: "+str(Kpp_T2))
display(Image(url='images/espejo_n.png'))
print("Espejo NMOS")
display(Image(url='images/espejo_p.png'))
print("Espejo PMOS")
import sys
import fileinput
def modificar_cir_Espejo_NMOS(W,L,Kp_T1,Kp_T2):
text="* Simulación Circuito Espejo de Corriente con Ncmos, valores reales de Kp_n y Vt"+"\n"+ \
"* Universidad Nacional de Colombia 2016"+"\n"+ \
"* CMOS Analógico"+"\n"+ \
"* Grupo Jorge Garzón, Esteban Iafrancesco A"+"\n"+ \
"\n"+\
"VDD VDD 0 DC 10 AC 0"+"\n"+\
"V2 VR 0 DC 10 AC 0"+"\n"+\
"VRD RDN VR DC 0 AC 0"+"\n"+\
"RD RDN DRAIN 1000"+"\n"+\
"RP VDD GATE 2000"+"\n"+\
"M1 DRAIN GATE 0 0 nmosideal W="+str(W)+" L="+str(L)+"\n"+\
"M2 GATE GATE 0 0 nmosideal W="+str(W)+" L="+str(L)+"\n"+\
"\n"+\
"VRD2 RDN2 VR DC 0 AC 0"+"\n"+\
"RD2 RDN2 DRAIN2 1000"+"\n"+\
"RP2 VDD GATE2 2000"+"\n"+\
"M3 DRAIN2 GATE2 0 0 nmos1 W="+str(W)+" L="+str(L)+"\n"+\
"M4 GATE2 GATE2 0 0 nmos2 W="+str(W)+" L="+str(L)+"\n"+\
"\n"+\
".model nmosideal nmos LEVEL=1 Vto=0.8 KP=120u LAMBDA=0.01 U0=650"+"\n"+\
".model nmos1 nmos LEVEL=1 Vto=0.8 KP="+str(Kp_T1)+" LAMBDA=0.01 U0=650"+"\n"+\
".model nmos2 nmos LEVEL=1 Vto=0.8 KP="+str(Kp_T2)+" LAMBDA=0.01 U0=650"+"\n"+\
"\n"+\
".control"+"\n"+\
"set color0 =white"+"\n"+\
"set color1=black"+"\n"+\
"op"+"\n"+\
"show all"+"\n"+\
"dc vdd 0.7 12 0.01"+"\n"+\
"plot i(vrd) i(vrd2)"+"\n"+\
".endc"+"\n"
for i, line in enumerate(fileinput.input('../spice-simulations/espejoNmosPythonFile.cir', inplace=1)):
if i == 1: sys.stdout.write(text) # replace 'sit' and write
fileinput.close()
def modificar_cir_Espejo_PMOS(W,L,Kp_T1,Kp_T2):
text="* Simulación Circuito Espejo de Corriente con Ncmos, valores reales de Kp_n y Vt"+"\n"+ \
"* Universidad Nacional de Colombia 2016"+"\n"+ \
"* CMOS Analógico"+"\n"+ \
"* Grupo Jorge Garzón, Esteban Iafrancesco A"+"\n"+ \
"\n"+\
"VDD VDD 0 DC -10 AC 0"+"\n"+\
"V2 VR 0 DC -10 AC 0"+"\n"+\
"VRD RDN VR DC 0 AC 0"+"\n"+\
"RD RDN DRAIN 1000"+"\n"+\
"RP VDD GATE 2000"+"\n"+\
"M1 DRAIN GATE 0 0 pmosideal W="+str(W)+" L="+str(L)+"\n"+\
"M2 GATE GATE 0 0 pmosideal W="+str(W)+" L="+str(L)+"\n"+\
"\n"+\
"VRD2 RDN2 VR DC 0 AC 0"+"\n"+\
"RD2 RDN2 DRAIN2 1000"+"\n"+\
"RP2 VDD GATE2 2000"+"\n"+\
"M3 DRAIN2 GATE2 0 0 pmos1 W="+str(W)+" L="+str(L)+"\n"+\
"M4 GATE2 GATE2 0 0 pmos2 W="+str(W)+" L="+str(L)+"\n"+\
"\n"+\
".model pmosideal pmos LEVEL=1 Vto=-0.9 KP=40u LAMBDA=0.0125 U0=250"+"\n"+\
".model pmos1 pmos LEVEL=1 Vto=-0.9 KP="+str(Kp_T1)+" LAMBDA=0.0125 U0=250"+"\n"+\
".model pmos2 pmos LEVEL=1 Vto=-0.9 KP="+str(Kp_T2)+" LAMBDA=0.0125 U0=250"+"\n"+\
"\n"+\
".control"+"\n"+\
"set color0 =white"+"\n"+\
"set color1=black"+"\n"+\
"op"+"\n"+\
"show all"+"\n"+\
"dc vdd -0.8 -12 -0.01"+"\n"+\
"plot i(vrd) i(vrd2)"+"\n"+\
".endc"+"\n"
for i, line in enumerate(fileinput.input('../spice-simulations/espejoPmosPythonFile.cir', inplace=1)):
if i == 1: sys.stdout.write(text) # replace 'sit' and write
fileinput.close()
paint_matrix = matrix(int(size_m),int(size_m))
opcion1()
corner_kp_n()
plt.matshow(Kp_n)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpn_T1=prom_KP_n_for_transistor(1,common_source)
Kpn_T2=prom_KP_n_for_transistor(2,common_source)
Kpn_T3=prom_KP_n_for_transistor(3,common_source)
Kpn_T4=prom_KP_n_for_transistor(4,common_source)
#modificar_cir_Espejo_NMOS(WTA,L,Kpn_T1,Kpn_T2)
display(Image(url='images/corner_TA_TB.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_n de la oblea. Transistores TA y TB")
#modificar_cir_Espejo_NMOS(WTA,L,Kpn_T1,Kpn_T3)
display(Image(url='images/corner_TA_TC.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_n de la oblea. Transistores TA y TC")
#modificar_cir_Espejo_NMOS(WTA,L,Kpn_T2,Kpn_T3)
display(Image(url='images/corner_TB_TC.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_n de la oblea. Transistores TB y TC")
paint_matrix = matrix(int(size_m),int(size_m))
opcion1()
center_kp_n()
plt.matshow(Kp_n)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpn_T1=prom_KP_n_for_transistor(1,common_source)
Kpn_T2=prom_KP_n_for_transistor(2,common_source)
Kpn_T3=prom_KP_n_for_transistor(3,common_source)
Kpn_T4=prom_KP_n_for_transistor(4,common_source)
#modificar_cir_Espejo_NMOS(WTA,L,Kpn_T1,Kpn_T2)
display(Image(url='images/cetroid_Kp_TA_TB_n.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_n de la oblea. Transistores TA y TB")
#modificar_cir_Espejo_NMOS(WTA,L,Kpn_T2,Kpn_T3)
display(Image(url='images/cetroid_Kp_TB_TC_n.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_n de la oblea. Transistores TB y TC")
paint_matrix = matrix(int(size_m),int(size_m))
opcion2()
corner_kp_n()
plt.matshow(Kp_n)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpn_T1=prom_KP_n_for_transistor(4,common_source)
Kpn_T2=prom_KP_n_for_transistor(5,common_source)
#modificar_cir_Espejo_NMOS(WTA,L,Kpn_T1,Kpn_T2)
display(Image(url='images/corner_Kp_CS_n.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_n de la oblea. Diseno de source compartido")
paint_matrix = matrix(int(size_m),int(size_m))
opcion2()
center_kp_n()
plt.matshow(Kp_n)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpn_T1=prom_KP_n_for_transistor(4,common_source)
Kpn_T2=prom_KP_n_for_transistor(5,common_source)
#modificar_cir_Espejo_NMOS(WTA,L,Kpn_T1,Kpn_T2)
display(Image(url='images/center_Kp_CS_n.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_n de la oblea. Diseno de source compartido")
paint_matrix = matrix(int(size_m),int(size_m))
opcion1()
corner_kp_p()
plt.matshow(Kp_p)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpp_T1=prom_KP_p_for_transistor(1,common_source)
Kpp_T2=prom_KP_p_for_transistor(2,common_source)
Kpp_T3=prom_KP_p_for_transistor(3,common_source)
Kpp_T4=prom_KP_p_for_transistor(4,common_source)
#modificar_cir_Espejo_PMOS(WTA,L,Kpp_T1,Kpp_T2)
display(Image(url='images/corner_TA_TB_p.png'))
print("VDD vs -Iout. Rojo Espejo ideal PMOS, Azul Espejo Con variaciones en KP_p de la oblea. Transistores TA y TB")
#modificar_cir_Espejo_PMOS(WTA,L,Kpp_T1,Kpp_T3)
display(Image(url='images/corner_TA_TC_p.png'))
print("VDD vs -Iout. Rojo Espejo ideal PMOS, Azul Espejo Con variaciones en KP_p de la oblea. Transistores TA y TC")
#modificar_cir_Espejo_PMOS(WTA,L,Kpp_T2,Kpp_T3)
display(Image(url='images/corner_TB_TC_p.png'))
print("VDD vs -Iout. Rojo Espejo ideal PMOS, Azul Espejo Con variaciones en KP_p de la oblea. Transistores TB y TC")
paint_matrix = matrix(int(size_m),int(size_m))
opcion1()
center_kp_p()
plt.matshow(Kp_p)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpp_T1=prom_KP_p_for_transistor(1,common_source)
Kpp_T2=prom_KP_p_for_transistor(2,common_source)
Kpp_T3=prom_KP_p_for_transistor(3,common_source)
Kpp_T4=prom_KP_p_for_transistor(4,common_source)
#modificar_cir_Espejo_PMOS(WTA,L,Kpp_T1,Kpp_T2)
display(Image(url='images/center_Kp_LS_p.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_p de la oblea. Transistores TA y TB")
#modificar_cir_Espejo_PMOS(WTA,L,Kpp_T2,Kpp_T3)
display(Image(url='images/center_Kp_LS_p_RB_RC.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_p de la oblea. Transistores TB y TC")
paint_matrix = matrix(int(size_m),int(size_m))
opcion2()
corner_kp_p()
plt.matshow(Kp_p)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpp_T1=prom_KP_p_for_transistor(4,common_source)
Kpp_T2=prom_KP_p_for_transistor(5,common_source)
#modificar_cir_Espejo_PMOS(WTA,L,Kpp_T1,Kpp_T2)
display(Image(url='images/center_Kp_CS_p.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_p de la oblea. Diseno de source compartido")
paint_matrix = matrix(int(size_m),int(size_m))
opcion2()
center_kp_p()
plt.matshow(Kp_p)
plt.show()
plt.matshow(paint_matrix)
plt.show()
Kpp_T1=prom_KP_p_for_transistor(4,common_source)
Kpp_T2=prom_KP_p_for_transistor(5,common_source)
#modificar_cir_Espejo_PMOS(WTA,L,Kpp_T1,Kpp_T2)
display(Image(url='images/center_Kp_CS_p_btr.png'))
print("VDD vs -Iout. Rojo Espejo ideal, Azul Espejo Con variaciones en KP_p de la oblea. Diseno de source compartido")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Variacion por cuadro de Kp del 0.0004%
Step2: Matriz de Trasconductancia ideal KP_n_Ideal
Step3: Matriz de Trasconductancia ideal KP_p_Ideal
Step4: OPCION 1 Creacion de las matrices del material con variaciones en Kp_n y Kp_p a partir de la esquina
Step5: OPCION 2 Creacion de las matrices del material con variaciones en Kp_n y Kp_p del centro hacia afuera
Step6: Inicializando la matriz de dibujo en el material
Step7: Ingrese las dimensiones LxW de los transistores
Step8: OPCION 1 de Diseño de 4 transistores de optimizacion de espacio con centroide comun en la oblea, para usar 2 de esos en la simulacion del espejo de corriente
Step9: OPCION 2, Diseño de 2 transistores de centroide comun con source compartido (ver imagen). Diseno sugerido de diferentes articulos
Step10: Calculo del valor promedio de KP para cada transistor
Step11: Con variacion de variacion a partir de la esquina de Kp en la oblea
Step12: Con variacion del centro hacia afuera de Kp en la oblea
Step13: NGSPICE Simulations - 01 Current Mirror
Step14: Funcion para editar los archivo de simulacion del espejo NMOS 'espejoNmosPythonFile.cir' y el espejo P 'espejoPmosPythonFile.cir'
Step15: Espejo NMOS con variaciones de KP_n en la oblea
Step16: Simulacion con variacion desde el centro de Kp_n en la oblea y centroide comun entre 2 transistores (de los 4 disponibles) para formar el espejo de corriente.
Step17: Simulacion con variacion desde la esquina de Kp_n en la oblea y diseno 2 de surce compartido, centroide comun entre 2 transistores para formar el espejo de corriente.
Step18: Simulacion con variacion desde el centro de Kp_n en la oblea y diseno 2 de surce compartido, centroide comun entre 2 transistores para formar el espejo de corriente.
Step19: Espejo PMOS con variaciones de KP_p en la oblea
Step20: Simulacion con variacion desde el centro de Kp_p en la oblea y centroide comun entre 2 transistores (de los 4 disponibles) para formar el espejo de corriente.
Step21: Simulacion con variacion desde la esquina de Kp_p en la oblea y diseno 2 de surce compartido, centroide comun entre 2 transistores para formar el espejo de corriente.
Step22: Simulacion con variacion desde el centro de Kp_p en la oblea y diseno 2 de surce compartido, centroide comun entre 2 transistores para formar el espejo de corriente.
|
10,214
|
<ASSISTANT_TASK:>
Python Code:
%pylab notebook
%precision 2
Pn = 100e6 # [W]
PF = 0.8
f_nl_A = 61.0 # [Hz]
SD_A = 3 # [%]
f_nl_B = 61.5 # [Hz]
SD_B = 3.4 # [%]
f_nl_C = 60.5 # [Hz]
SD_C = 2.6 # [%]
f_fl_A = f_nl_A / (SD_A / 100.0 +1)
f_fl_B = f_nl_B / (SD_B / 100.0 +1)
f_fl_C = f_nl_C / (SD_C / 100.0 +1)
print ('f_fl_A = {:.3f} Hz'.format(f_fl_A))
print ('f_fl_B = {:.3f} Hz'.format(f_fl_B))
print ('f_fl_C = {:.3f} Hz'.format(f_fl_C))
sp_A = Pn / (f_nl_A - f_fl_A)
sp_B = Pn / (f_nl_B - f_fl_B)
sp_C = Pn / (f_nl_C - f_fl_C)
print('''
sp_A = {:.2f} MW/Hz
sp_B = {:.2f} MW/Hz
sp_C = {:.2f} MW/Hz
'''.format(sp_A/1e6, sp_B/1e6, sp_C/1e6))
Pload = 230e6 # [W]
f_sys = (sp_A*f_nl_A + sp_B*f_nl_B + sp_C*f_nl_C - Pload) / (sp_A + sp_B + sp_C)
print('''
f_sys = {:.2f} Hz
================'''.format(f_sys))
Pa = sp_A * (f_nl_A - f_sys)
Pb = sp_B * (f_nl_B - f_sys)
Pc = sp_C * (f_nl_C - f_sys)
print('''
Pa = {:.1f} MW
Pb = {:.1f} MW
Pc = {:.1f} MW
============'''.format(Pa/1e6, Pb/1e6, Pc/1e6))
Pload_plot = arange(0,300.1,5) * 1e6 # [W]
f_sys = (sp_A*f_nl_A + sp_B*f_nl_B + sp_C*f_nl_C - Pload_plot) / (sp_A + sp_B + sp_C)
PA = sp_A * (f_nl_A - f_sys)
PB = sp_B * (f_nl_B - f_sys)
PC = sp_C * (f_nl_C - f_sys)
title('Power Sharing Versus Total Load')
xlabel('Total Load [MW]')
ylabel('Generator Power [MW]')
plot(Pload_plot/1e6, PA/1e6, 'g--', linewidth = 2)
plot(Pload_plot/1e6, PB/1e6, 'b', linewidth = 2 )
plot(Pload_plot/1e6, PC/1e6, 'm.', linewidth = 2)
plot([0, 300], [Pn/1e6, Pn/1e6], 'r', linewidth = 2)
plot([0, 300], [0, 0], 'r:', linewidth = 2)
legend(('Generator A','Generator B','Generator C','upper power limit', 'lower power limit'), loc=4, framealpha=1);
grid()
interp(Pn, PB, Pload_plot)/1e6 # using the interpolate function to determine
# the exact crossover of PB and Pn @Pload
interp(0, PC, Pload_plot)/1e6 # using the interpolate function to determine
# the exact crossover of PC and 0 @Pload
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Description
Step2: (a)
Step3: and the slopes of the power-frequency curves are
Step4: The total load is 230 MW, so the system frequency can be optained form the load power as follows
Step5: The power supplied by each generator will be
Step6: (b)
Step7: Calculate the system frequency as function of $P_\text{load}$ using
Step8: Calculate the power of each generator
Step9: Plot the power sharing versus load
Step10: This plot reveals that there are power sharing problems both for high loads and for low loads. Generator B is the first to exceed its ratings as load increases. Its rated power is reached at a total load of
Step11: MW.
|
10,215
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import matplotlib.pyplot as plt
import shapefile as shp
from pprint import pprint
from matplotlib.path import Path as Polygon
def inpoly(x, y, pgcoords):
Returns bool array [Ny, Nx] telling which grid points are inside polygon
try:
isinstance(pgcoords,(list, tuple, np.ndarray))
len(pgcoords[0])==2
pgon = Polygon(pgcoords)
except:
print('pgcoords must be like [(0, 0), (1, 0), ..] or\n'
+'an np.ndarray of shape [Np, 2]')
raise TypeError("Can't create polygon, pgcoords error")
try:
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
x.shape == y.shape
except:
raise err.ShapeError("x and y not np.ndarrays with same shape.")
if len(x.shape)==1:
Y, Y = np.meshgrid(x, y)
else:
X = x
Y = y
xy = np.vstack((X.ravel(), Y.ravel())).T
return pgon.contains_points(xy).reshape(X.shape)
os.listdir('.')
# directory (this will be different on your computer)
shpdir = os.path.join('.', 'Sectie')
fname = os.listdir(shpdir)[0]
print("The shapefile to work with: ",fname)
rdr = shp.Reader(os.path.join(shpdir, fname))
print("\nAttributes and methods accessible throuhgh this reader:\n")
# learn to read and create comprehensions, this is one
pprint([p for p in dir(rdr) if not p.startswith('_')])
rdr.fields
fldNames = [p[0] for p in rdr.fields]
print(fldNames)
print(fldNames)
for r in rdr.iterRecords():
print(r)
shprecs = rdr.shapeRecords()
shprecs
for i, sr in enumerate(shprecs):
print("Shape number ", i+1)
print("Bounding box = ",sr.shape.bbox)
print("Record = ",sr.record)
print()
[att for att in dir(sr.shape) if not att.startswith('_')]
rdr.bbox
ix = [0, 2, 2, 0, 0] # x-indices in rdr.box
iy = [1, 1, 3, 3, 1] # y-indices
plt.plot(np.array([rdr.bbox[i] for i in ix]),
np.array([rdr.bbox[i] for i in iy]), 'r', lw=3)
plt.show()
np.array(shprecs[0].shape.points)
plt.plot(np.array([rdr.bbox[i] for i in ix]),
np.array([rdr.bbox[i] for i in iy]), 'r', lw=3)
for sr in shprecs:
pnts = np.array(sr.shape.points)
plt.plot(pnts[:,0], pnts[:,1])
plt.show()
# grid line coordinates
x = np.linspace(rdr.bbox[0], rdr.bbox[2], 81)
y = np.linspace(rdr.bbox[1], rdr.bbox[3], 41)
# cell center coordinates
xm = 0.5 * (x[:-1] + x[1:])
ym = 0.5 * (y[:-1] + y[1:])
# generage a full 2D array for both the x and y coordinates
XM, YM = np.meshgrid(xm, ym)
fig, ax = plt.subplots()
ikh = fldNames.index('KH')
KH = np.zeros_like(XM) # 2D array to fill with conductivities from shapes
# iterate ove the list of shaperecords. In each loop the next index,
# the next color for the plot and the next records is given.
for i, clr, sr in zip(range(len(shprecs)), "brgmcy", shprecs):
pnts = sr.shape.points # the shape coordinates
inarray = inpoly(XM, YM, pnts) # boolean array to match the shape
KH[inarray] = sr.record[ikh] # fill in the value
ax.plot(XM[inarray], YM[inarray], clr+'o', label="shape {}".format(i))
ax.legend(loc='best', fontsize='x-small')
plt.show()
# plt.spy shows where an array is non-zero
plt.spy(KH)
plt.show()
indices = range(0, KH.shape[1], 8)
for i, kh in zip(indices, np.split(KH, indices[1:], axis=1)):
print("Columns {}:{}".format(i,i+kh.shape[1]))
for L in kh:
print(("{:8.4g}" * len(L)).format(*L))
print()
print("The kh values of the different shapes: ", [sr.record[ikh] for sr in shprecs])
plt.matshow(KH)
plt.show()
plt.contourf(XM, YM, KH, 50)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We need a function that will tell us whether a coordinate pair is inside a shape or not. This function exists in matplotlib.path and is called Path. Path creates a polygon which can then check if points are inside or outside it. Because I like the word Polygon better than Path for this goal, I import the Path class and call it Polygon here.
Step3: I define a function inpoly that checks whether the coordinates x,y are inside the shape given by pgcoords. The function returns a logical array (boolean area) of the same shape as the inputs x and y such that points inside the polygon are True in the array and False otherwise.
Step4: CD to the exercises/Mar21 dictory, where we have a shape file stored in the subdirectory Sectie (Section)
Step5: Define the director where the shape file is and get the file name
Step6: Open a reader to read the shapefile
Step7: So we can read the bounding box of all the shapes in the file, its table, its elevaton (if specified), its fiels names etc.
Step8: Each field not only gives the name of the data but also whether it string )'C' or a number 'N' and its length (first number). The second number is the number of didgets, when the number is floating point.
Step9: you may use
Step10: Read the shapeRecords from the shapefile into a list
Step11: Each of these ShapeRecods contain both the shape and its record (data).
Step12: What attributes/methods has a shape?
Step13: Let's now get serious and define a model grid containing all shapes. Simply use the overall bounding box to get the grid extension.
Step14: Get the indices from the bounding box so that we can plot is in a single line
Step15: With these indices in sequence we can get the x and y coordinates of the bbox so arranged that the bbox is plotted (in red with linewidth 3, so that it shows up at the boundary of the plot.
Step16: Now see how we can access the point os a shape, here the first shape in our shape-records list, so shprecs[0].shape.
Step17: With all this in place we can plot the bounding box of all shapes and then each shape itself using its points as an array.
Step18: The next step is to fill the grid points that fall within each shape with the data value that belongs to the shape. We'll use the horizontal conductivity KH that is contained in each shape's record as can be seen from the field names above.
Step19: Then fill in the data value for each grid and plot the result, once color per shape.
Step20: Et voilà !
Step21: Show the KH array the Matlab way, which is much easier to see. In spyder you may inspect it in the variable window as a kind of spreadsheet.
Step22: Another way to show an array is to plot it using plt.matshow. Each cell will be colored according to its value. Clearly if the values differ too little, the show up as the same value. This is actually the case here.
Step23: You could also contour the shapes, but that has the same problems as with matshow
|
10,216
|
<ASSISTANT_TASK:>
Python Code:
a = range(100, 1000)
b = range(100, 1000)
lst = []
for x in a:
for y in b:
p = x * y
if str(p) == str(p)[::-1]:
lst.append(p)
print(max(lst))
# This cell will be used for grading, leave it at the end of the notebook.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then I created an empty list, which would hold all the palindromes created from multiplying those three digit numbers.
Step2: Using two for loops, I looped through all the values in a, multiplying them by all the possible values of b, and calling the product p. Using an if statement, I turned the product into a string and compared whether it was the same fowards as backwards. If it was, I appended it to my list of palindromes.
Step3: Finally, I printed the maximum palindrome using the built in max function.
|
10,217
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import openfermion
except ImportError:
!pip install git+https://github.com/quantumlib/OpenFermion.git@master#egg=openfermion
import openfermion
# Set parameters of jellium model.
wigner_seitz_radius = 5. # Radius per electron in Bohr radii.
n_dimensions = 2 # Number of spatial dimensions.
grid_length = 2 # Number of grid points in each dimension.
spinless = True # Whether to include spin degree of freedom or not.
n_electrons = 2 # Number of electrons.
# Figure out length scale based on Wigner-Seitz radius and construct a basis grid.
length_scale = openfermion.wigner_seitz_length_scale(
wigner_seitz_radius, n_electrons, n_dimensions)
grid = openfermion.Grid(n_dimensions, grid_length, length_scale)
# Initialize the model and print out.
fermion_hamiltonian = openfermion.jellium_model(grid, spinless=spinless, plane_wave=False)
print(fermion_hamiltonian)
# Convert to DiagonalCoulombHamiltonian type.
hamiltonian = openfermion.get_diagonal_coulomb_hamiltonian(fermion_hamiltonian)
import cirq
import openfermion
# Obtain the Bogoliubov transformation matrix.
quadratic_hamiltonian = openfermion.QuadraticHamiltonian(hamiltonian.one_body)
_, transformation_matrix, _ = quadratic_hamiltonian.diagonalizing_bogoliubov_transform()
# Create a circuit that prepares the mean-field state
occupied_orbitals = range(n_electrons)
n_qubits = openfermion.count_qubits(quadratic_hamiltonian)
qubits = cirq.LineQubit.range(n_qubits)
state_preparation_circuit = cirq.Circuit(
openfermion.bogoliubov_transform(
qubits, transformation_matrix, initial_state=occupied_orbitals))
# Print circuit.
cirq.DropNegligible().optimize_circuit(state_preparation_circuit)
print(state_preparation_circuit)
from openfermion.circuits import trotter
# Set algorithm parameters.
time = 1.0
n_steps = 1
order = 1
# Construct circuit
swap_network_trotter_step = cirq.Circuit(
openfermion.simulate_trotter(
qubits, hamiltonian, time, n_steps, order,
algorithm=trotter.LINEAR_SWAP_NETWORK),
strategy=cirq.InsertStrategy.EARLIEST)
# Print circuit.
cirq.DropNegligible().optimize_circuit(swap_network_trotter_step)
print(swap_network_trotter_step.to_text_diagram(transpose=True))
split_operator_trotter_step = cirq.Circuit(
openfermion.simulate_trotter(
qubits, hamiltonian, time, n_steps, order,
algorithm=trotter.SPLIT_OPERATOR),
strategy=cirq.InsertStrategy.EARLIEST)
cirq.DropNegligible().optimize_circuit(split_operator_trotter_step)
print(split_operator_trotter_step.to_text_diagram(transpose=True))
# Initialize Cirq simulator.
simulator = cirq.Simulator()
# Convert the Hamiltonian to a sparse matrix.
hamiltonian_sparse = openfermion.get_sparse_operator(hamiltonian)
# Obtain initial state vector as integer.
initial_state = sum(2 ** (n_qubits - 1 - i) for i in occupied_orbitals)
# Construct and simulate circuit using the swap network method.
circuit = state_preparation_circuit + swap_network_trotter_step
result = simulator.simulate(circuit, initial_state=initial_state)
final_state = result.final_state_vector
print('Energy of state obtained with swap network method: {}'.format(
openfermion.expectation(hamiltonian_sparse, final_state).real))
# Construct and simulate circuit using the split-operator method.
circuit = state_preparation_circuit + split_operator_trotter_step
result = simulator.simulate(circuit, initial_state=initial_state)
final_state = result.final_state_vector
print('Energy of state obtained with split-operator method: {}'.format(
openfermion.expectation(hamiltonian_sparse, final_state).real))
# Set algorithm parameters.
time = 1.0
n_steps = 1
order = 0
# Construct circuit
swap_network_trotter_step = cirq.Circuit(
openfermion.simulate_trotter(
qubits, hamiltonian, time, n_steps, order,
algorithm=trotter.LINEAR_SWAP_NETWORK),
strategy=cirq.InsertStrategy.EARLIEST)
cirq.DropNegligible().optimize_circuit(swap_network_trotter_step)
print(swap_network_trotter_step.to_text_diagram(transpose=True))
swap_network_trotter_step = cirq.Circuit(
openfermion.simulate_trotter(
qubits, hamiltonian, time, n_steps, order,
algorithm=trotter.LINEAR_SWAP_NETWORK,
omit_final_swaps=True),
strategy=cirq.InsertStrategy.EARLIEST)
cirq.DropNegligible().optimize_circuit(swap_network_trotter_step)
print(swap_network_trotter_step.to_text_diagram(transpose=True))
order=3
n_steps=1
swap_network_trotter_step = cirq.Circuit(
openfermion.simulate_trotter(
qubits, hamiltonian, time, n_steps, order,
algorithm=trotter.LINEAR_SWAP_NETWORK),
strategy=cirq.InsertStrategy.EARLIEST)
cirq.DropNegligible().optimize_circuit(swap_network_trotter_step)
print(swap_network_trotter_step.to_text_diagram(transpose=True))
# Define a phase estimation circuit.
def measure_bit_of_phase(system_qubits,
control_qubit,
controlled_unitary):
yield cirq.H(control_qubit)
yield controlled_unitary
yield cirq.H(control_qubit)
yield cirq.measure(control_qubit)
# Get an upper bound on the Hamiltonian norm.
import numpy
bound = numpy.sum(numpy.abs(hamiltonian.one_body)) + numpy.sum(numpy.abs(hamiltonian.two_body))
# Construct phase estimation circuit.
time = 2 * numpy.pi / bound
control = cirq.LineQubit(-1)
controlled_unitary = openfermion.simulate_trotter(
qubits, hamiltonian, time,
n_steps=1,
order=1,
algorithm=trotter.LINEAR_SWAP_NETWORK,
control_qubit=control)
circuit = cirq.Circuit(
measure_bit_of_phase(
qubits,
control,
controlled_unitary))
# Print the circuit.
cirq.DropNegligible().optimize_circuit(circuit)
print(circuit.to_text_diagram(transpose=True))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Circuits 2
Step2: Electronic structure Hamiltonians with diagonal Coulomb operators
Step3: In the last line above we converted the FermionOperator to a class called DiagonalCoulombHamiltonian which is a special data structure in OpenFermion for representing operators that take the form
Step4: Hamiltonian simulation via a Trotter-Suzuki product formula
Step5: Now let's do the same, but using the SPLIT_OPERATOR method.
Step6: Let's run these circuits on the simulator that comes with Cirq and compute the energy of the resulting states.
Step7: Increasing the number of Trotter steps will cause both methods to converge to the same operation, corresponding to an exact simulation. You can play around with the number of Trotter steps to confirm. Note that for NISQ applications one will often be interested in using the zeroth-order Trotter step, also known as the first-order asymmetric Trotter step. We can implement these Trotter steps by setting the order to zero, as we do below.
Step8: Note the unusual pattern of fermionic swap networks towards the end. What is happening there is that in the zeroth order step of a LINEAR_SWAP_NETWORK style Trotter step, the qubit order is reversed upon output. To avoid this one needs to set an option called omit_final_swaps, e.g.
Step9: One can also have fun compiling arbitrary high-order formulas. Here's the third-order symmetric formula
Step10: Application to phase estimation
|
10,218
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import YouTubeVideo
YouTubeVideo('sXx-PpEBR7k')
from IPython.display import YouTubeVideo
YouTubeVideo('_Xcmh1LQB9I')
from IPython.display import YouTubeVideo
YouTubeVideo('jmMcJ4XlrWM', start=195, end=234)
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn import datasets
digits = datasets.load_digits()
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:10]):
plt.subplot(2, 10, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r)
plt.show()
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn import datasets
digits = datasets.load_digits()
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:1]):
plt.subplot(2, 1, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r)
plt.title('Feature: ' + str(image.flatten()))
plt.show()
from sklearn import datasets
from sklearn.utils import shuffle
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
X_digits, y_digits = shuffle(X_digits, y_digits, random_state=0)
n_samples = len(X_digits)
X_train = X_digits[:int(.8 * n_samples)]
y_train = y_digits[:int(.8 * n_samples)]
X_test = X_digits[int(.8 * n_samples):]
y_test = y_digits[int(.8 * n_samples):]
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.utils import shuffle
from systemml.mllearn import LogisticRegression
from pyspark.sql import SQLContext
sqlCtx = SQLContext(sc)
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
X_digits, y_digits = shuffle(X_digits, y_digits, random_state=0)
n_samples = len(X_digits)
X_test = X_digits[int(.8 * n_samples):]
y_test = y_digits[int(.8 * n_samples):]
training_fraction = [0.005, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
scores = []
for frac in training_fraction:
X_train = X_digits[:int(frac * n_samples)]
y_train = y_digits[:int(frac * n_samples)]
classifier = LogisticRegression(sqlCtx)
score = classifier.fit(X_train, y_train).score(X_test, y_test)
scores = scores + [ score ]
plt.plot(training_fraction, scores)
plt.xlabel('Fraction of data used for training: E')
plt.ylabel('Prediction score (higher the better): P')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The first generation of AI researchers were John McCarthy
Step2: Neural networks returns
Step3: The success of VC theory for number of problems like handwriting recognition, using support vector machines (SVM),
Step4: Step 2
Step5: This is an extremely crucial step and if done incorrectly can screw up the whole learning process (atleast for most if not all
Step 3
Step6: General Machine Learning Setup
|
10,219
|
<ASSISTANT_TASK:>
Python Code:
FACETS_INSTALL_DIR = './'
%%bash -s "$FACETS_INSTALL_DIR"
if [ ! -d "${1}/facets" ]; then
# Install facets - only need to do this once per Datalab instance.
cd $1
git clone https://github.com/PAIR-code/facets
cd facets
jupyter nbextension install facets-dist/
else
echo Facets is already installed under $1.
fi
# Add the facets overview python code to the python path and import dependencies.
import os
import sys
sys.path.append(os.path.join(FACETS_INSTALL_DIR, 'facets/facets_overview/python'))
reload(sys)
sys.setdefaultencoding('utf-8')
import pandas as pd
import google.datalab.bigquery as bq
from generic_feature_statistics_generator import GenericFeatureStatisticsGenerator
from IPython.core.display import display, HTML
import base64
sql =
--
-- The 1000 Genomes metadata includes gender, familial relationships, population,
-- super population, sequencing metrics, etc.
--
SELECT
*
FROM
`genomics-public-data.1000_genomes.sample_info`
query = bq.Query(sql)
df = query.execute().result().to_dataframe()
proto = GenericFeatureStatisticsGenerator().ProtoFromDataFrames([{'name': 'test', 'table': df}])
protostr = base64.b64encode(proto.SerializeToString()).decode("utf-8")
HTML_TEMPLATE = <link rel="import" href="{facetsPath}" >
<h4>Facets Overview of dataframe with shape {shape}</h4>
<facets-overview id="overviewelem"></facets-overview>
<script>
document.querySelector("#overviewelem").height = "1000px";
document.querySelector("#overviewelem").protoInput = "{protostr}";
</script>
html = HTML_TEMPLATE.format(facetsPath=os.path.join(FACETS_INSTALL_DIR, 'facets/facets-dist/facets-jupyter.html'),
shape=str(df.shape),
protostr=protostr)
display(HTML(html))
jsonstr = df.to_json(orient='records')
HTML_TEMPLATE = <link rel="import" href="{facetsPath}" >
<h4>Facets Dive of dataframe with shape {shape}</h4>
<facets-dive id="diveelem"></facets-dive>
<script>
var data = {jsonstr};
document.querySelector("#diveelem").height = "1000px";
document.querySelector("#diveelem").data = data;
// Specify a few default settings.
document.querySelector("#diveelem").positionMode = 'scatter';
// Specify a few default settings specific to 1000 Genomes.
document.querySelector("#diveelem").horizontalFacet = 'Super_Population';
document.querySelector("#diveelem").verticalFacet = 'Main_Project_E_Centers';
document.querySelector("#diveelem").horizontalPosition = 'Total_Exome_Sequence';
document.querySelector("#diveelem").verticalPosition = 'Total_LC_Sequence';
document.querySelector("#diveelem").colorBy = 'In_Phase1_Integrated_Variant_Set';
</script>
html = HTML_TEMPLATE.format(facetsPath=os.path.join(FACETS_INSTALL_DIR, 'facets/facets-dist/facets-jupyter.html'),
shape=str(df.shape),
jsonstr=jsonstr)
display(HTML(html))
sql =
--
-- Examine metadata about individuals in the Personal Genomes Project.
--
SELECT * FROM `google.com:biggene.pgp.phenotypes`
sql =
--
-- Examine metadata about individuals in the Simons Genome Diversity Project.
--
SELECT *
FROM `genomics-public-data.simons_genome_diversity_project.sample_metadata`
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Retrieve the data
Step3: Execute the query to fill a Pandas dataframe with the data of interest.
Step5: Visualize the result with Facets
Step7: Facets Dive
Step10: Additional Queries
|
10,220
|
<ASSISTANT_TASK:>
Python Code:
import enoslib as en
# Enable rich logging
_ = en.init_logging()
# claim the resources
network = en.G5kNetworkConf(type="prod", roles=["my_network"], site="rennes")
conf = (
en.G5kConf.from_settings(job_type="allow_classic_ssh", job_name="enoslib_observability")
.add_network_conf(network)
.add_machine(
roles=["control", "xp"], cluster="parasilo", nodes=1, primary_network=network
)
.add_machine(
roles=["agent", "xp"], cluster="parasilo", nodes=1, primary_network=network
)
.finalize()
)
conf
provider = en.G5k(conf)
roles, networks = provider.init()
roles
with en.actions(roles=roles["agent"]) as a:
a.apt(name="stress", state="present")
# Start a capture on all nodes
# - stress on some nodes
import time
with en.Dstat(nodes=roles["xp"]) as d:
time.sleep(5)
en.run_command("stress --cpu 4 --timeout 10", roles=roles["agent"])
time.sleep(5)
backup_dir = d.backup_dir
import pandas as pd
import seaborn as sns
print(backup_dir)
#Create a dictionnary of (alias) -> list of pandas df
result = pd.DataFrame()
for host in roles["xp"]:
host_dir = backup_dir / host.alias
csvs = host_dir.rglob("*.csv")
for csv in csvs:
print(csv)
df = pd.read_csv(csv, skiprows=5, index_col=False)
df["host"] = host.alias
df["csv"] = csv
result = pd.concat([result, df], axis=0, ignore_index=True)
result
# let's show the metrics !
sns.lineplot(data=result, x="epoch", y="usr", hue="host", markers=True, style="host")
# start a capture
# - on all the interface configured on the my_network network
# - we dump icmp traffic only
# - for the duration of the commands (here a client is pigging the server)
with en.TCPDump(
hosts=roles["xp"], ifnames=["any"], options="icmp"
) as t:
backup_dir = t.backup_dir
_ = en.run(f"ping -c10 {roles['control'][0].address}", roles["agent"])
from scapy.all import rdpcap
import tarfile
# Examples:
# create a dictionnary of (alias, if) -> list of decoded packets by scapy
decoded_pcaps = dict()
for host in roles["control"]:
host_dir = backup_dir / host.alias
t = tarfile.open(host_dir / "tcpdump.tar.gz")
t.extractall(host_dir / "extracted")
# get all extracted pcap for this host
pcaps = (host_dir / "extracted").rglob("*.pcap")
for pcap in pcaps:
decoded_pcaps.setdefault((host.alias, pcap.with_suffix("").name),
rdpcap(str(pcap)))
# Displaying some packets
for (host, ifs), packets in decoded_pcaps.items():
print(host, ifs)
packets[0].show()
packets[1].show()
roles = en.sync_info(roles, networks)
# start a capture
# - on all the interface configured on the my_network network
# - we dump icmp traffic only
# - for the duration of the commands (here a client is pigging the server)
with en.TCPDump(
hosts=roles["xp"], networks=networks["my_network"], options="icmp"
) as t:
backup_dir = t.backup_dir
_ = en.run(f"ping -c10 {roles['control'][0].address}", roles["agent"])
monitoring = en.TIGMonitoring(collector=roles["control"][0], agent=roles["agent"], ui=roles["control"][0])
monitoring
monitoring.deploy()
en.run_command("stress --cpu 4 --timeout 60", roles=roles["agent"], background=True)
# create a tunnel to the service running inside g5k
tunnel = en.G5kTunnel(address=monitoring.ui.address, port=3000)
local_address, local_port, _ = tunnel.start()
print(f"The service is running at http://localhost:{local_port} (admin:admin)")
# don't forget to close it
tunnel.close()
import time
with en.G5kTunnel(address=monitoring.ui.address, port=3000) as (_, local_port, _):
print(f"The service is running at http://localhost:{local_port}")
time.sleep(60)
provider.destroy()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A simple load generator
Step2: Monitoring with dstat
Step3: Visualization
Step4: Packet sniffing with tcpdump
Step5: Visualization
Step6: Capture on a specific network
Step7: Monitoring with Telegraf/[InfluxDB|prometheus]/grafana
Step8: To not forget to close the tunnel you can use a context manager
Step9: Cleaning
|
10,221
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'uhh', 'sandbox-3', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
10,222
|
<ASSISTANT_TASK:>
Python Code:
# import modules
import pandas as pd
# Create dataframe
raw_data = {'regiment': ['Nighthawks', 'Nighthawks', 'Nighthawks', 'Nighthawks', 'Dragoons', 'Dragoons', 'Dragoons', 'Dragoons', 'Scouts', 'Scouts', 'Scouts', 'Scouts'],
'company': ['1st', '1st', '2nd', '2nd', '1st', '1st', '2nd', '2nd','1st', '1st', '2nd', '2nd'],
'name': ['Miller', 'Jacobson', 'Ali', 'Milner', 'Cooze', 'Jacon', 'Ryaner', 'Sone', 'Sloan', 'Piger', 'Riani', 'Ali'],
'preTestScore': [4, 24, 31, 2, 3, 4, 24, 31, 2, 3, 2, 3],
'postTestScore': [25, 94, 57, 62, 70, 25, 94, 57, 62, 70, 62, 70]}
df = pd.DataFrame(raw_data, columns = ['regiment', 'company', 'name', 'preTestScore', 'postTestScore'])
df
# Create a groupby variable that groups preTestScores by regiment
groupby_regiment = df['preTestScore'].groupby(df['regiment'])
groupby_regiment
list(df['preTestScore'].groupby(df['regiment']))
df['preTestScore'].groupby(df['regiment']).describe()
groupby_regiment.mean()
df['preTestScore'].groupby([df['regiment'], df['company']]).mean()
df['preTestScore'].groupby([df['regiment'], df['company']]).mean().unstack()
df.groupby(['regiment', 'company']).mean()
df.groupby(['regiment', 'company']).size()
# Group the dataframe by regiment, and for each regiment,
for name, group in df.groupby('regiment'):
# print the name of the regiment
print(name)
# print the data of that regiment
print(group)
list(df.groupby(df.dtypes, axis=1))
df.groupby('regiment').mean().add_prefix('mean_')
def get_stats(group):
return {'min': group.min(), 'max': group.max(), 'count': group.count(), 'mean': group.mean()}
bins = [0, 25, 50, 75, 100]
group_names = ['Low', 'Okay', 'Good', 'Great']
df['categories'] = pd.cut(df['postTestScore'], bins, labels=group_names)
df['postTestScore'].groupby(df['categories']).apply(get_stats).unstack()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: "This grouped variable is now a GroupBy object. It has not actually computed anything yet except for some intermediate data about the group key df['key1']. The idea is that this object has all of the information needed to then apply some operation to each of the groups." - Python for Data Analysis
Step2: Descriptive statistics by group
Step3: Mean of each regiment's preTestScore
Step4: Mean preTestScores grouped by regiment and company
Step5: Mean preTestScores grouped by regiment and company without heirarchical indexing
Step6: Group the entire dataframe by regiment and company
Step7: Number of observations in each regiment and company
Step8: Iterate an operations over groups
Step9: Group by columns
Step10: In the dataframe "df", group by "regiments, take the mean values of the other variables for those groups, then display them with the prefix_mean
Step11: Create a function to get the stats of a group
Step12: Create bins and bin up postTestScore by those pins
Step13: Apply the get_stats() function to each postTestScore bin
|
10,223
|
<ASSISTANT_TASK:>
Python Code:
# Import of the pyomo module
from pyomo.environ import *
# Creation of a Concrete Model
model = ConcreteModel()
## Define sets ##
# Sets
# i canning plants / seattle, san-diego /
# j markets / new-york, chicago, topeka / ;
model.i = Set(initialize=['seattle','san-diego'], doc='Canning plans')
model.j = Set(initialize=['new-york','chicago', 'topeka'], doc='Markets')
## Define parameters ##
# Parameters
# a(i) capacity of plant i in cases
# / seattle 350
# san-diego 600 /
# b(j) demand at market j in cases
# / new-york 325
# chicago 300
# topeka 275 / ;
model.a = Param(model.i, initialize={'seattle':350,'san-diego':600}, doc='Capacity of plant i in cases')
model.b = Param(model.j, initialize={'new-york':325,'chicago':300,'topeka':275}, doc='Demand at market j in cases')
# Table d(i,j) distance in thousands of miles
# new-york chicago topeka
# seattle 2.5 1.7 1.8
# san-diego 2.5 1.8 1.4 ;
dtab = {
('seattle', 'new-york') : 2.5,
('seattle', 'chicago') : 1.7,
('seattle', 'topeka') : 1.8,
('san-diego','new-york') : 2.5,
('san-diego','chicago') : 1.8,
('san-diego','topeka') : 1.4,
}
model.d = Param(model.i, model.j, initialize=dtab, doc='Distance in thousands of miles')
# Scalar f freight in dollars per case per thousand miles /90/ ;
model.f = Param(initialize=90, doc='Freight in dollars per case per thousand miles')
# Parameter c(i,j) transport cost in thousands of dollars per case ;
# c(i,j) = f * d(i,j) / 1000 ;
def c_init(model, i, j):
return model.f * model.d[i,j] / 1000
model.c = Param(model.i, model.j, initialize=c_init, doc='Transport cost in thousands of dollar per case')
## Define variables ##
# Variables
# x(i,j) shipment quantities in cases
# z total transportation costs in thousands of dollars ;
# Positive Variable x ;
model.x = Var(model.i, model.j, bounds=(0.0,None), doc='Shipment quantities in case')
## Define contrains ##
# supply(i) observe supply limit at plant i
# supply(i) .. sum (j, x(i,j)) =l= a(i)
def supply_rule(model, i):
return sum(model.x[i,j] for j in model.j) <= model.a[i]
model.supply = Constraint(model.i, rule=supply_rule, doc='Observe supply limit at plant i')
# demand(j) satisfy demand at market j ;
# demand(j) .. sum(i, x(i,j)) =g= b(j);
def demand_rule(model, j):
return sum(model.x[i,j] for i in model.i) >= model.b[j]
model.demand = Constraint(model.j, rule=demand_rule, doc='Satisfy demand at market j')
def supply_rule(model, i):
supply = 0.0
for j in model.j:
supply += model.x[i,j]
return supply <= model.a[i]
## Define Objective and solve ##
# cost define objective function
# cost .. z =e= sum((i,j), c(i,j)*x(i,j)) ;
# Model transport /all/ ;
# Solve transport using lp minimizing z ;
def objective_rule(model):
return sum(model.c[i,j]*model.x[i,j] for i in model.i for j in model.j)
model.objective = Objective(rule=objective_rule, sense=minimize, doc='Define objective function')
def objective_rule(model):
obj = 0.0
for ki in model.i:
for kj in model.j:
obj += model.c[ki,kj]*model.x[ki,kj]
return obj
## Display of the output ##
# Display x.l, x.m ;
def pyomo_postprocess(options=None, instance=None, results=None):
model.x.display()
# This is an optional code path that allows the script to be run outside of
# pyomo command-line. For example: python transport.py
if __name__ == '__main__':
# This emulates what the pyomo command-line tools does
from pyomo.opt import SolverFactory
import pyomo.environ
opt = SolverFactory("glpk")
results = opt.solve(model)
#sends results to stdout
results.write()
print("\nDisplaying Solution\n" + '-'*60)
pyomo_postprocess(None, model, results)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
!cat transport.py
!pyomo solve --solver=glpk transport.py
!cat results.yml
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set Definitions
Step2: Parameters
Step3: A third, powerful way to initialize a parameter is using a user-defined function.
Step4: Variables
Step5: Constrains
Step6: The above code take advantage of list comprehensions, a powerful feature of the python language that provides a concise way to loop over a list. If we take the supply_rule as example, this is actually called two times by pyomo (once for each of the elements of i). Without list comprehensions we would have had to write our function using a for loop, like
Step7: Using list comprehension is however quicker to code and more readable.
Step8: As we are here looping over two distinct sets, we can see how list comprehension really simplifies the code. The objective function could have being written without list comprehension as
Step9: Retrieving the Output
Step10: We can print model structure information with model.pprint() (“pprint” stand for “pretty print”).
Step11: Finally, if you are very lazy and want to run the script with just ./transport.py (and you are in Linux) add the following lines at the top
Step12: Complete script
Step13: Solutions
Step14: By default, the optimization results are stored in the file results.yml
|
10,224
|
<ASSISTANT_TASK:>
Python Code:
def func():
return 1
func()
s = 'Global Variable'
def func():
print locals()
print globals()
print globals().keys()
globals()['s']
func()
def hello(name='Jose'):
return 'Hello '+name
hello()
greet = hello
greet
greet()
del hello
hello()
greet()
def hello(name='Jose'):
print 'The hello() function has been executed'
def greet():
return '\t This is inside the greet() function'
def welcome():
return "\t This is inside the welcome() function"
print greet()
print welcome()
print "Now we are back inside the hello() function"
hello()
welcome()
def hello(name='Jose'):
def greet():
return '\t This is inside the greet() function'
def welcome():
return "\t This is inside the welcome() function"
if name == 'Jose':
return greet
else:
return welcome
x = hello()
x
print x()
def hello():
return 'Hi Jose!'
def other(func):
print 'Other code would go here'
print func()
other(hello)
def new_decorator(func):
def wrap_func():
print "Code would be here, before executing the func"
func()
print "Code here will execute after the func()"
return wrap_func
def func_needs_decorator():
print "This function is in need of a Decorator"
func_needs_decorator()
# Reassign func_needs_decorator
func_needs_decorator = new_decorator(func_needs_decorator)
func_needs_decorator()
@new_decorator
def func_needs_decorator():
print "This function is in need of a Decorator"
func_needs_decorator()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Scope Review
Step2: Remember that Python functions create a new scope, meaning the function has its own namespace to find variable names when they are mentioned within the function. We can check for local variables and global variables with the local() and globals() functions. For example
Step3: Here we get back a dictionary of all the global variables, many of them are predefined in Python. So let's go ahead and look at the keys
Step4: Note how s is there, the Global Variable we defined as a string
Step5: Now lets run our function to check for any local variables in the func() (there shouldn't be any)
Step6: Great! Now lets continue with building out the logic of what a decorator is. Remember that in Python everything is an object. That means functions are objects which can be assigned labels and passed into other functions. Lets start with some simple examples
Step7: Assign a label to the function. Note that e are not using parentheses here because we are not calling the function hello, instead we are just putting it into the greet variable.
Step8: This assignment is not attached to the original function
Step9: Functions within functions
Step10: Note how due to scope, the welcome() function is not defined outside of the hello() function. Now lets learn about returning functions from within functions
Step11: Now lets see what function is returned if we set x = hello(), note how the closed parenthesis means that name ahs been defined as Jose.
Step12: Great! Now we can see how x is pointing to the greet function inside of the hello function.
Step13: Lets take a quick look at the code again.
Step14: Great! Note how we can pass the functions as objects and then use them within other functions. Now we can get started with writing our first decorator
Step15: So what just happened here? A decorator simple wrapped the function and modified its behaviour. Now lets understand how we can rewrite this code using the @ symbol, which is what Python uses for Decorators
|
10,225
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plot
from ipywidgets import interactive
import ipywidgets as widgets
import math
from pulp import *
%matplotlib inline
H1 = 0.3073
H2 = 0.8935
H3 = 1.1064
def J_fun(mu):
I = (1 - 2**(-H1*(2*mu)**H2))**H3
return I
def invJ_fun(I):
if I > (1-1e-10):
return 100
mu = 0.5*(-(1/H1) * np.log2(1 - I**(1/H3)))**(1/H2)
return mu
def find_best_lambda(mu_c, v_max, dc):
# quantization of EXIT chart
D = 500
I_range = np.arange(0, D, 1)/D
# Linear Programming model, maximize target expression
model = pulp.LpProblem("Finding best lambda problem", pulp.LpMaximize)
# definition of variables, v_max entries \lambda_i that are between 0 and 1 (implicit declaration of constraint 2)
v_lambda = pulp.LpVariable.dicts("lambda", range(v_max),0,1)
# objective function
cv = 1/np.arange(v_max,0,-1)
model += pulp.lpSum(v_lambda[i]*cv[i] for i in range(v_max))
# constraints
# constraint 1, no variable nodes of degree 1
model += v_lambda[v_max-1] == 0
# constraint 3, sum of lambda_i must be 1
model += pulp.lpSum(v_lambda[i] for i in range(v_max))==1
# constraints 4, fixed point condition for all the descrete xi values (a total number of D, for each \xi)
for myI in I_range:
model += pulp.lpSum(v_lambda[j] * J_fun(mu_c + (v_max-1-j)*invJ_fun(myI)) for j in range(v_max)) - 1 + J_fun(1/(dc-1)*invJ_fun(1-myI)) >= 0
# constraint 5, stability condition
model += v_lambda[v_max-2] <= np.exp(mu_c/4)/(dc-1)
model.solve()
if model.status != 1:
r_lambda = []
else:
r_lambda = [v_lambda[i].varValue for i in range(v_max)]
return r_lambda
best_lambda = find_best_lambda(3.8086, 16, 14)
print(np.poly1d(best_lambda, variable='Z'))
def best_lambda_interactive(mu_c, v_max, dc):
# get lambda and rho polynomial from optimization and from c_avg, respectively
p_lambda = find_best_lambda(mu_c, v_max, dc)
# if optimization successful, compute rate and show plot
if not p_lambda:
print('Optimization infeasible, no solution found')
else:
design_rate = 1 - 1/(dc * np.polyval(np.polyint(p_lambda),1))
if design_rate <= 0:
print('Optimization feasible, but no code with positive rate found')
else:
print("Lambda polynomial:")
print(np.poly1d(p_lambda, variable='Z'))
print("Design rate r_d = %1.3f" % design_rate)
# Plot EXIT-Chart
print("EXIT Chart:")
plot.figure(3)
x = np.linspace(0, 1, num=100)
y_v = [np.sum([p_lambda[j] * J_fun(mu_c + (v_max-1-j)*invJ_fun(xv)) for j in range(v_max)]) for xv in x]
y_c = [1-J_fun((dc-1)*invJ_fun(1-xv)) for xv in x]
plot.plot(x, y_v, '#7030A0')
plot.plot(y_c, x, '#008000')
plot.axis('equal')
plot.gca().set_aspect('equal', adjustable='box')
plot.xlim(0,1)
plot.ylim(0,1)
plot.grid()
plot.show()
interactive_plot = interactive(best_lambda_interactive, \
mu_c=widgets.FloatSlider(min=0.5,max=8,step=0.01,value=3, continuous_update=False, description=r'\(\mu_c\)',layout=widgets.Layout(width='50%')), \
v_max = widgets.IntSlider(min=3, max=20, step=1, value=16, continuous_update=False, description=r'\(d_{\mathtt{v},\max}\)'), \
dc = widgets.IntSlider(min=3,max=20,step=1,value=4, continuous_update=False, description=r'\(d_{\mathtt{c}}\)'))
output = interactive_plot.children[-1]
output.layout.height = '400px'
interactive_plot
def find_best_rate(mu_c, dv_max, dc_max):
c_range = np.arange(3, dc_max+1)
rates = np.zeros_like(c_range,dtype=float)
# loop over all c_avg, add progress bar
f = widgets.FloatProgress(min=0, max=np.size(c_range))
display(f)
for index,dc in enumerate(c_range):
f.value += 1
p_lambda = find_best_lambda(mu_c, dv_max, dc)
if p_lambda:
design_rate = 1 - 1/(dc * np.polyval(np.polyint(p_lambda),1))
if design_rate >= 0:
rates[index] = design_rate
# find largest rate
largest_rate_index = np.argmax(rates)
best_lambda = find_best_lambda(mu_c, dv_max, c_range[largest_rate_index])
print("Found best code of rate %1.3f for average check node degree of %1.2f" % (rates[largest_rate_index], c_range[largest_rate_index]))
print("Corresponding lambda polynomial")
print(np.poly1d(best_lambda, variable='Z'))
# Plot curve with all obtained results
plot.figure(4, figsize=(10,3))
plot.plot(c_range, rates, 'b--s',color=(0, 0.59, 0.51))
plot.plot(c_range[largest_rate_index], rates[largest_rate_index], 'rs')
plot.xlim(3, dc_max)
plot.xticks(range(3,dc_max+1))
plot.ylim(0, 1)
plot.xlabel('$d_{\mathtt{c}}$')
plot.ylabel('design rate $r_d$')
plot.grid()
plot.show()
return rates[largest_rate_index]
interactive_optim = interactive(find_best_rate, \
mu_c=widgets.FloatSlider(min=0.1,max=10,step=0.01,value=2, continuous_update=False, description=r'\(\mu_c\)',layout=widgets.Layout(width='50%')), \
dv_max = widgets.IntSlider(min=3, max=20, step=1, value=16, continuous_update=False, description=r'\(d_{\mathtt{v},\max}\)'), \
dc_max = widgets.IntSlider(min=3, max=40, step=1, value=22, continuous_update=False, description=r'\(d_{\mathtt{c},\max}\)'))
output = interactive_optim.children[-1]
output.layout.height = '400px'
interactive_optim
target_rate = 0.7
dv_max = 16
dc_max = 22
T_Delta = 0.01
mu_c = 10
Delta_mu = 10
while Delta_mu >= T_Delta:
print('Running optimization for mu_c = %1.5f, corresponding to Es/N0 = %1.2f dB' % (mu_c, 10*np.log10(mu_c/4)))
rate = find_best_rate(mu_c, dv_max, dc_max)
if rate > target_rate:
mu_c = mu_c - Delta_mu / 2
else:
mu_c = mu_c + Delta_mu / 2
Delta_mu = Delta_mu / 2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Approximation of the J-function taken from [1] with
Step2: The following function solves the optimization problem that returns the best $\lambda(Z)$ for a given BI-AWGN channel quality $E_s/N_0$, corresponding to a $\mu_c = 4\frac{E_s}{N_0}$, for a regular check node degree $d_{\mathtt{c}}$, and for a maximum variable node degree $d_{\mathtt{v},\max}$. This optimization problem is derived in the lecture as
Step3: As an example, we consider the case of optimization carried out in the lecture after 10 iterations, where we have $\mu_c = 3.8086$ and $d_{\mathtt{c}} = 14$ with $d_{\mathtt{v},\max}=16$
Step4: In the following, we provide an interactive widget that allows you to choose the parameters of the optimization yourself and get the best possible $\lambda(Z)$. Additionally, the EXIT chart is plotted to visualize the good fit of the obtained degree distribution.
Step5: Now, we carry out the optimization over a wide range of $d_{\mathtt{c},\text{avg}}$ values for a given $\epsilon$ and find the largest possible rate.
Step6: Running binary search to find code with a given target rate for the AWGN channel
|
10,226
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
import pyquickhelper
params={"blob_storage":"",
"password1":"",
"hadoop_server":"",
"password2":"",
"username":""}
pyquickhelper.ipythonhelper.open_html_form(params=params,title="server + hadoop + credentials", key_save="blobhp")
import pyensae
%load_ext pyensae
blobstorage = blobhp["blob_storage"]
blobpassword = blobhp["password1"]
hadoop_server = blobhp["hadoop_server"]
hadoop_password = blobhp["password2"]
username = blobhp["username"]
client, bs = %hd_open
client, bs
with open("DataTEST.txt", "w") as f :
f.write("1"+"\t"+"2"+"\n"+"1"+"\t"+"4"+"\n"+"2"+"\t"+"3"+"\n"+"2"+"\t"+"5"+"\n"+"3"+"\t"+"4"+"\n"+"4"+"\t"+"5"+"\n"+"5"+"\t"+"3"+"\n"+"5"+"\t"+"1"+"\n"+"5"+"\t"+"2")
import pandas
df = pandas.read_csv("DataTEST.txt", sep="\t",names=["Frm","To"])
df
%blob_up DataTEST.txt /$PSEUDO/Data/DataTEST.txt
%blob_ls /$PSEUDO/Data/
pyensae.download_data("web-Google.txt.gz", url="http://snap.stanford.edu/data/")
%head web-Google.txt
with open("web-Google.txt", "r") as f:
with open("DataGoogle.txt", "w") as g:
for line in f:
if not line.startswith("#"):
g.write(line)
%head DataGoogle.txt
%blob_up DataGoogle.txt /$PSEUDO/Data/DataGoogle.txt
%blob_ls /$PSEUDO/Data/
%%PIG Creation_Graph.pig
Arcs = LOAD '$CONTAINER/$PSEUDO/Data/$path'
USING PigStorage('\t')
AS (frm:int,to:int);
GrSort = GROUP Arcs BY frm;
deg_sort = FOREACH GrSort
GENERATE COUNT(Arcs) AS degs, Arcs , group AS ID;
GrEntr = GROUP Arcs BY to;
GrFin= JOIN deg_sort BY ID,
GrEntr BY group;
N = FOREACH (group GrSort ALL)
GENERATE COUNT(GrSort);
Pr = FOREACH GrFin
GENERATE deg_sort::ID AS ID , (float) 1 / (float)N.$0 AS PageRank;
PageRank = JOIN GrFin BY deg_sort::ID,
Pr BY ID;
STORE PageRank
INTO '$CONTAINER/$PSEUDO/Projet/SortTest.txt'
USING PigStorage('\t') ;
client.pig_submit(bs,
client.account_name,
"Creation_Graph.pig",
params=dict(path="DataTEST.txt"),
stop_on_failure=True)
st = %hd_job_status job_1435385350894_0001
st["id"],st["percentComplete"],st["status"]["jobComplete"]
%tail_stderr job_1435385350894_0001 10
%%PIG iteration.pig
gr = LOAD '$CONTAINER/$PSEUDO/Projet/SortTest.txt'
USING PigStorage('\t')
AS (DegS:long,Asort:{(frm: int,to: int)},Noeud:int,Noeud2:int,Aent:{(frm: int,to: int)},ID: int,PageRank: float);
Arcs = LOAD '$CONTAINER/$PSEUDO/Data/DataTEST.txt'
USING PigStorage('\t')
AS (frm:int,to:int);
Graph = FOREACH gr
GENERATE Noeud , DegS, PageRank AS Pinit, PageRank, PageRank/ (float) DegS AS Ratio;
DEFINE my_macro(G,A,ALP) RETURNS S {
Gi= FOREACH $G GENERATE Noeud , Ratio;
GrEntr = JOIN $A BY frm , Gi BY Noeud ;
Te = GROUP GrEntr BY to;
so = FOREACH Te GENERATE SUM(GrEntr.Ratio) AS Pr, group AS ID;
tu = JOIN $G BY Noeud, so BY ID;
sort = FOREACH tu GENERATE Noeud , DegS, Pinit, $ALP*Pinit+(1-$ALP)*Pr AS PageRank;
$S = FOREACH sort GENERATE Noeud , DegS, Pinit, PageRank, PageRank/ (float) DegS AS Ratio;
}
Ite1 = my_macro(Graph,Arcs,$alpha);
Ite2 = my_macro(Ite1,Arcs,$alpha);
Ite3 = my_macro(Ite2,Arcs,$alpha);
Ite4 = my_macro(Ite3,Arcs,$alpha);
Ite5 = my_macro(Ite4,Arcs,$alpha);
Ite6 = my_macro(Ite5,Arcs,$alpha);
Ite7 = my_macro(Ite6,Arcs,$alpha);
Ite8 = my_macro(Ite7,Arcs,$alpha);
Dump Ite1;
dump Ite8;
jid = client.pig_submit(bs,
client.account_name,
"iteration.pig",
params=dict(alpha="0"),
stop_on_failure=True )
jid
st = %hd_job_status job_1435385350894_0006
st["id"],st["percentComplete"],st["status"]["jobComplete"]
%tail_stderr job_1435385350894_0006 20
%%PIG Creation_Graph2.pig
Arcs = LOAD '$CONTAINER/$PSEUDO/Data/$path'
USING PigStorage('\t')
AS (frm:int,to:int);
GrSort = GROUP Arcs BY frm;
deg_sort = FOREACH GrSort
GENERATE COUNT(Arcs) AS degs, Arcs , group AS ID;
GrEntr = GROUP Arcs BY to;
GrFin = JOIN deg_sort BY ID,
GrEntr BY group;
N = FOREACH (GROUP GrSort ALL)
GENERATE COUNT(GrSort);
Pr = FOREACH GrFin
GENERATE deg_sort::ID AS ID , (float) 1 / (float)N.$0 AS PageRank;
PageRank = JOIN GrFin BY deg_sort::ID, Pr BY ID;
STORE PageRank
INTO '$CONTAINER/$PSEUDO/Projet/SortGoogle.txt'
USING PigStorage('\t') ;
client.pig_submit(bs, client.account_name, "Creation_Graph2.pig", params=dict(path="DataGoogle.txt"), stop_on_failure=True )
st = %hd_job_status job_1435385350894_0037
st["id"],st["percentComplete"],st["status"]["jobComplete"]
%tail_stderr job_1435385350894_0037 20
%%PIG iteration2.pig
gr = LOAD '$CONTAINER/$PSEUDO/Projet/SortGoogle.txt'
USING PigStorage('\t')
AS (DegS:long,Asort:{(frm: int,to: int)},Noeud:int,Noeud2:int,Aent:{(frm: int,to: int)},ID: int,PageRank: float);
Arcs = LOAD '$CONTAINER/$PSEUDO/Data/DataGoogle.txt'
USING PigStorage('\t')
AS (frm:int,to:int);
Graph = FOREACH gr
GENERATE Noeud , DegS, PageRank AS Pinit, PageRank, PageRank/ (float) DegS AS Ratio;
DEFINE my_macro(G,A,ALP) RETURNS S {
Gi= FOREACH $G GENERATE Noeud , Ratio;
GrEntr = JOIN $A by frm , Gi by Noeud ;
Te = GROUP GrEntr by to;
so = FOREACH Te generate SUM(GrEntr.Ratio) AS Pr, group AS ID;
tu = JOIN $G by Noeud, so by ID;
sort = FOREACH tu GENERATE Noeud , DegS, Pinit, $ALP*Pinit+(1-$ALP)*Pr AS PageRank;
$S = FOREACH sort GENERATE Noeud , DegS, Pinit, PageRank, PageRank/ (float) DegS AS Ratio;
}
Ite1 = my_macro(Graph,Arcs,$alpha);
Ite2 = my_macro(Ite1,Arcs,$alpha);
Ite3 = my_macro(Ite2,Arcs,$alpha);
Ite4 = my_macro(Ite3,Arcs,$alpha);
Ite5 = my_macro(Ite4,Arcs,$alpha);
Ite6 = my_macro(Ite5,Arcs,$alpha);
Ite7 = my_macro(Ite6,Arcs,$alpha);
Ite8 = my_macro(Ite7,Arcs,$alpha);
Dump Ite1;
dump Ite8;
STORE Ite8 INTO '$CONTAINER/$PSEUDO/Projet/PageRank.txt' USING PigStorage('\t') ;
client.pig_submit(bs,
client.account_name,
"iteration2.pig",
params=dict(alpha="0.5"),
stop_on_failure=True )
st = %hd_job_status job_1435385350894_0042
st["id"],st["percentComplete"],st["status"]["jobComplete"]
%tail_stderr job_1435385350894_0042 20
%blob_downmerge /$PSEUDO/Projet/PageRank.txt PageRank.txt
import pandas
import matplotlib as plt
plt.style.use('ggplot')
df = pandas.read_csv("PageRank.txt", sep="\t",names=["Node","OutDeg","Pinit", "PageRank", "k"])
df
df['PageRank'].hist(bins=100, range=(0,0.000005))
df.sort_values("PageRank",ascending=False).head()
%blob_close
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Connexion au cluster
Step2: Création d'un petit jeu de données
Step3: On importe ce graphe
Step4: On vérifie que les données ont bien été chargées
Step5: Récupération de données réelles
Step6: On filtre les premières lignes.
Step7: Algorithme Page Rank
Step8: Itérations
Step9: On peut alors s'intéresser aux vraies données !
|
10,227
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
rng = np.random.RandomState(1)
x = 10 * rng.rand(50)
y = 2 * x - 5 + rng.randn(50)
plt.scatter(x, y);
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True)
model.fit(x[:, np.newaxis], y)
xfit = np.linspace(0, 10, 1000)
ytest = 2*xfit -5
yfit = model.predict(xfit[:, np.newaxis])
plt.scatter(x, y)
plt.plot(xfit, yfit);
print("Model slope: ", model.coef_[0])
print("Model intercept:", model.intercept_)
# Root mean square error 均方根误差,亦称标准误差
# https://en.wikipedia.org/wiki/Root-mean-square_deviation
def rmse(y_test, y_pred):
mse = np.mean((y_test - y_pred) ** 2)
return mse ** 0.5
# R square
def R2(y_test, y_pred):
residuals_sum_of_squares = np.sum((y_pred - y_test)**2)
total_sum_of_squares = np.sum((y_test - np.mean(y_test))**2)
return 1 - residuals_sum_of_squares/total_sum_of_squares
# https://en.wikipedia.org/wiki/Coefficient_of_determination
print('RMSE: %.4f' % rmse(ytest, yfit))
print('R2 score: %.4f' % R2(ytest, yfit))
from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score
print('RMSE: %.4f' % mean_squared_error(ytest, yfit) ** 0.5)
print('R2 score: %.4f' % r2_score(ytest, yfit))
print('Variance score: %.4f' % explained_variance_score(ytest, yfit))
rng = np.random.RandomState(1)
X = 10 * rng.rand(100, 3)
y = 0.5 + np.dot(X, [1.5, -2., 1.])
# $y$ is constructed from three random $x$ values
model.fit(X, y)
print(model.intercept_)
print(model.coef_)
from sklearn.preprocessing import PolynomialFeatures
x = np.array([2, 3, 4])
poly = PolynomialFeatures(3, include_bias=False)
poly.fit_transform(x[:, None])
from sklearn.pipeline import make_pipeline
poly_model = make_pipeline(PolynomialFeatures(7),
LinearRegression())
rng = np.random.RandomState(1)
x = 10 * rng.rand(50)
y = np.sin(x) + 0.1 * rng.randn(50)
xfit = np.linspace(0, 10, 1000)
poly_model.fit(x[:, np.newaxis], y)
yfit = poly_model.predict(xfit[:, np.newaxis])
plt.scatter(x, y)
plt.plot(xfit, yfit);
from sklearn.base import BaseEstimator, TransformerMixin
class GaussianFeatures(BaseEstimator, TransformerMixin):
Uniformly spaced Gaussian features for one-dimensional input
def __init__(self, N, sigma_factor=2.0):
self.N = N
self.sigma_factor = sigma_factor
@staticmethod
def _gauss_basis(x, mu, sigma, axis=None):
arg = (x - mu) / sigma
return np.exp(-0.5 * np.sum(arg ** 2, axis))
def fit(self, X, y=None):
# create N centers spread along the data range
self.mu_ = np.linspace(X.min(), X.max(), self.N)
self.sigma_ = self.sigma_factor * (self.mu_[1] - self.mu_[0])
return self
def transform(self, X):
return self._gauss_basis(X[:, :, np.newaxis], self.mu_,
self.sigma_, axis=1)
rng = np.random.RandomState(1)
x = 10 * rng.rand(50)
y = np.sin(x) + 0.1 * rng.randn(50)
xfit = np.linspace(0, 10, 1000)
gauss_model = make_pipeline(GaussianFeatures(20),
LinearRegression())
gauss_model.fit(x[:, np.newaxis], y)
yfit = gauss_model.predict(xfit[:, np.newaxis])
plt.scatter(x, y)
plt.plot(xfit, yfit)
plt.xlim(0, 10);
model = make_pipeline(GaussianFeatures(30),
LinearRegression())
model.fit(x[:, np.newaxis], y)
plt.scatter(x, y)
plt.plot(xfit, model.predict(xfit[:, np.newaxis]))
plt.xlim(0, 10)
plt.ylim(-5, 1.5);
def basis_plot(model, title=None):
fig, ax = plt.subplots(2, sharex=True)
model.fit(x[:, np.newaxis], y)
ax[0].scatter(x, y)
ax[0].plot(xfit, model.predict(xfit[:, np.newaxis]))
ax[0].set(xlabel='x', ylabel='y', ylim=(-5, 1.5))
if title:
ax[0].set_title(title)
ax[1].plot(model.steps[0][1].mu_,
model.steps[1][1].coef_)
ax[1].set(xlabel='basis location',
ylabel='coefficient',
xlim=(0, 10))
model = make_pipeline(GaussianFeatures(30), LinearRegression())
basis_plot(model)
from sklearn.linear_model import Ridge
model = make_pipeline(GaussianFeatures(30), Ridge(alpha=0.1))
basis_plot(model, title='Ridge Regression')
from sklearn.linear_model import Lasso
model = make_pipeline(GaussianFeatures(30), Lasso(alpha=0.001))
basis_plot(model, title='Lasso Regression')
# !curl -o FremontBridge.csv https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD
import pandas as pd
counts = pd.read_csv('data/Fremont_Bridge.csv', index_col='Date', parse_dates=True)
weather = pd.read_csv('data/BicycleWeather.csv', index_col='DATE', parse_dates=True)
daily = counts.resample('d').sum()
daily['Total'] = daily.sum(axis=1)
daily = daily[['Total']] # remove other columns
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
for i in range(7):
daily[days[i]] = (daily.index.dayofweek == i).astype(float)
from pandas.tseries.holiday import USFederalHolidayCalendar
cal = USFederalHolidayCalendar()
holidays = cal.holidays('2012', '2016')
daily = daily.join(pd.Series(1, index=holidays, name='holiday'))
daily['holiday'].fillna(0, inplace=True)
def hours_of_daylight(date, axis=23.44, latitude=47.61):
Compute the hours of daylight for the given date
days = (date - pd.datetime(2000, 12, 21)).days
m = (1. - np.tan(np.radians(latitude))
* np.tan(np.radians(axis) * np.cos(days * 2 * np.pi / 365.25)))
return 24. * np.degrees(np.arccos(1 - np.clip(m, 0, 2))) / 180.
daily['daylight_hrs'] = list(map(hours_of_daylight, daily.index))
daily[['daylight_hrs']].plot()
plt.ylim(8, 17)
# temperatures are in 1/10 deg C; convert to C
weather['TMIN'] /= 10
weather['TMAX'] /= 10
weather['Temp (C)'] = 0.5 * (weather['TMIN'] + weather['TMAX'])
# precip is in 1/10 mm; convert to inches
weather['PRCP'] /= 254
weather['dry day'] = (weather['PRCP'] == 0).astype(int)
daily = daily.join(weather[['PRCP', 'Temp (C)', 'dry day']])
daily['annual'] = (daily.index - daily.index[0]).days / 365.
daily.head()
# Drop any rows with null values
daily.dropna(axis=0, how='any', inplace=True)
column_names = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun', 'holiday',
'daylight_hrs', 'PRCP', 'dry day', 'Temp (C)', 'annual']
X = daily[column_names]
y = daily['Total']
model = LinearRegression(fit_intercept=False)
model.fit(X, y)
daily['predicted'] = model.predict(X)
daily[['Total', 'predicted']].plot(alpha=0.5);
params = pd.Series(model.coef_, index=X.columns)
params
from sklearn.utils import resample
np.random.seed(1)
err = np.std([model.fit(*resample(X, y)).coef_
for i in range(1000)], 0)
print(pd.DataFrame({'effect': params.round(0),
'error': err.round(0)}))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple Linear Regression
Step2: We can use Scikit-Learn's LinearRegression estimator to fit this data and construct the best-fit line
Step3: The slope and intercept of the data are contained in the model's fit parameters, which in Scikit-Learn are always marked by a trailing underscore.
Step4: We see that the results are very close to the inputs, as we might hope.
Step5: The LinearRegression estimator is much more capable than this, however—in addition to simple straight-line fits, it can also handle multidimensional linear models of the form
Step6: we can use the single LinearRegression estimator to fit lines, planes, or hyperplanes to our data.
Step7: Basis Function Regression 基函数回归
Step8: We see here that the transformer has converted our one-dimensional array into a three-dimensional array by taking the exponent of each value.
Step9: With this transform in place, we can use the linear model to fit much more complicated relationships between $x$ and $y$.
Step11: Gaussian basis functions 高斯基函数
Step12: There is nothing magic about polynomial basis functions
Step13: With the data projected to the 30-dimensional basis, the model has far too much flexibility and goes to extreme values between locations where it is constrained by data.
Step14: This is typical over-fitting behavior when basis functions overlap
Step15: The $\alpha$ parameter is essentially a knob controlling the complexity of the resulting model.
Step16: With the lasso regression penalty, the majority of the coefficients are exactly zero,
Step17: Next we will compute the total daily bicycle traffic, and put this in its own dataframe
Step18: We saw previously that the patterns of use generally vary from day to day; let's account for this in our data by adding binary columns that indicate the day of the week
Step19: Similarly, we might expect riders to behave differently on holidays; let's add an indicator of this as well
Step21: We also might suspect that the hours of daylight would affect how many people ride; let's use the standard astronomical calculation to add this information
Step22: We can also add the average temperature and total precipitation to the data.
Step23: Finally, let's add a counter that increases from day 1, and measures how many years have passed.
Step24: Now our data is in order, and we can take a look at it
Step25: With this in place, we can choose the columns to use, and fit a linear regression model to our data.
Step26: Finally, we can compare the total and predicted bicycle traffic visually
Step27: It is evident that we have missed some key features, especially during the summer time.
Step28: These numbers are difficult to interpret without some measure of their uncertainty.
Step29: With these errors estimated, let's again look at the results
|
10,228
|
<ASSISTANT_TASK:>
Python Code:
# Import spaCy and load the language library
import spacy
nlp = spacy.load('en_core_web_sm')
# Create a Doc object
doc = nlp(u'Tesla is looking at buying U.S. startup for $6 million')
# Print each token separately
for token in doc:
print(token.text, token.pos_, token.dep_)
nlp.pipeline
nlp.pipe_names
doc2 = nlp(u"Tesla isn't looking into startups anymore.")
for token in doc2:
print(token.text, token.pos_, token.dep_)
doc2
doc2[0]
type(doc2)
doc2[0].pos_
doc2[0].dep_
spacy.explain('PROPN')
spacy.explain('nsubj')
# Lemmas (the base form of the word):
print(doc2[4].text)
print(doc2[4].lemma_)
# Simple Parts-of-Speech & Detailed Tags:
print(doc2[4].pos_)
print(doc2[4].tag_ + ' / ' + spacy.explain(doc2[4].tag_))
# Word Shapes:
print(doc2[0].text+': '+doc2[0].shape_)
print(doc[5].text+' : '+doc[5].shape_)
# Boolean Values:
print(doc2[0].is_alpha)
print(doc2[0].is_stop)
doc3 = nlp(u'Although commmonly attributed to John Lennon from his song "Beautiful Boy", \
the phrase "Life is what happens to us while we are making other plans" was written by \
cartoonist Allen Saunders and published in Reader\'s Digest in 1957, when Lennon was 17.')
life_quote = doc3[16:30]
print(life_quote)
type(life_quote)
doc4 = nlp(u'This is the first sentence. This is another sentence. This is the last sentence.')
for sent in doc4.sents:
print(sent)
doc4[6].is_sent_start
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This doesn't look very user-friendly, but right away we see some interesting things happen
Step2: Tokenization
Step3: Notice how isn't has been split into two tokens. spaCy recognizes both the root verb is and the negation attached to it. Notice also that both the extended whitespace and the period at the end of the sentence are assigned their own tokens.
Step4: Part-of-Speech Tagging (POS)
Step5: Dependencies
Step6: To see the full name of a tag use spacy.explain(tag)
Step7: Additional Token Attributes
Step8: Spans
Step9: In upcoming lectures we'll see how to create Span objects using Span(). This will allow us to assign additional information to the Span.
|
10,229
|
<ASSISTANT_TASK:>
Python Code:
from dolfin import *
from rbnics import *
@PullBackFormsToReferenceDomain()
@AffineShapeParametrization("data/hole_vertices_mapping.vmp")
class Hole(EllipticCoerciveProblem):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call the standard initialization
EllipticCoerciveProblem.__init__(self, V, **kwargs)
# ... and also store FEniCS data structures for assembly
assert "subdomains" in kwargs
assert "boundaries" in kwargs
self.subdomains, self.boundaries = kwargs["subdomains"], kwargs["boundaries"]
self.u = TrialFunction(V)
self.v = TestFunction(V)
self.dx = Measure("dx")(subdomain_data=subdomains)
self.ds = Measure("ds")(subdomain_data=boundaries)
self.subdomains = subdomains
self.boundaries = boundaries
# Return custom problem name
def name(self):
return "Hole"
# Return theta multiplicative terms of the affine expansion of the problem.
def compute_theta(self, term):
mu = self.mu
if term == "a":
theta_a0 = 1.0
theta_a1 = mu[2]
return (theta_a0, theta_a1)
elif term == "f":
theta_f0 = 1.0
return (theta_f0, )
else:
raise ValueError("Invalid term for compute_theta().")
# Return forms resulting from the discretization of the affine expansion of the problem operators.
def assemble_operator(self, term):
u = self.u
v = self.v
dx = self.dx
ds = self.ds
if term == "a":
a0 = inner(grad(u), grad(v)) * dx
a1 = inner(u, v) * ds(5) + inner(u, v) * ds(6) + inner(u, v) * ds(7) + inner(u, v) * ds(8)
return (a0, a1)
elif term == "f":
f0 = v * ds(1) + v * ds(2) + v * ds(3) + v * ds(4)
return (f0, )
elif term == "inner_product":
x0 = u * v * dx + inner(grad(u), grad(v)) * dx
return (x0,)
else:
raise ValueError("Invalid term for assemble_operator().")
mesh = Mesh("data/hole.xml")
subdomains = MeshFunction("size_t", mesh, "data/hole_physical_region.xml")
boundaries = MeshFunction("size_t", mesh, "data/hole_facet_region.xml")
V = FunctionSpace(mesh, "Lagrange", 1)
problem = Hole(V, subdomains=subdomains, boundaries=boundaries)
mu_range = [(0.5, 1.5), (0.5, 1.5), (0.01, 1.0)]
problem.set_mu_range(mu_range)
reduction_method = PODGalerkin(problem)
reduction_method.set_Nmax(20)
reduction_method.set_tolerance(1e-6)
reduction_method.initialize_training_set(100)
reduced_problem = reduction_method.offline()
online_mu = (0.5, 0.5, 0.01)
reduced_problem.set_mu(online_mu)
reduced_solution = reduced_problem.solve()
plot(reduced_solution, reduced_problem=reduced_problem)
reduction_method.initialize_testing_set(100)
reduction_method.error_analysis()
# 8. Perform a speedup analysis
reduction_method.initialize_testing_set(100)
reduction_method.speedup_analysis()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Affine decomposition
Step2: 4. Main program
Step3: 4.2. Create Finite Element space (Lagrange P1)
Step4: 4.3. Allocate an object of the Hole class
Step5: 4.4. Prepare reduction with a POD-Galerkin method
Step6: 4.5. Perform the offline phase
Step7: 4.6. Perform an online solve
Step8: 4.7. Perform an error analysis
Step9: 4.8. Perform a speedup analysis
|
10,230
|
<ASSISTANT_TASK:>
Python Code:
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set_style('whitegrid')
titanic = sns.load_dataset('titanic')
titanic.head()
sns.jointplot(x='fare',y='age',data=titanic)
sns.distplot(titanic['fare'],bins=30,kde=False,color='red')
sns.boxplot(x='class',y='age',data=titanic,palette='rainbow')
sns.swarmplot(x='class',y='age',data=titanic,palette='Set2')
sns.countplot(x='sex',data=titanic)
sns.heatmap(titanic.corr(),cmap='coolwarm')
plt.title('titanic.corr()')
g = sns.FacetGrid(data=titanic,col='sex')
g.map(plt.hist,'age')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Jointplot comparing fare and age
Step2: Plot the fare column as distribution
Step3: Displaying passenger and age over a boxplot
Step4: A simple count plot displying the number of passanger by sex
Step5: A heatmap showing the correlations for the entire dataset
Step6: Two histograms ploted using FacetGrid based on age and sex
|
10,231
|
<ASSISTANT_TASK:>
Python Code:
db_file = '../examples/data/clones_100.100.tab'
# dialect="excel" for CSV or XLS files
# for computational reasons, let's limit the dataset to the first 1000 sequences
X = io.load_dataframe(db_file, dialect="excel-tab")[:1000]
# turn the following off if data are real
# otherwise, assume that the "SEQUENCE_ID" field is composed as
# "[db]_[extension]_[id]_[id-true-clonotype]_[other-info]"
# See the example file for the format of the input.
X['true_clone'] = [x[3] for x in X.sequence_id.str.split('_')]
# group by junction and v genes
groups = X.groupby(["v_gene_set_str", "junc"]).groups.values()
idxs = np.array([elem[0] for elem in groups]) # take one of them
weights = np.array([len(elem) for elem in groups]) # assign its weight
n_clusters = 50
X_all = idxs.reshape(-1,1)
kmeans = MiniBatchKMeans(n_init=100, n_clusters=min(n_clusters, X_all.shape[0]))
lengths = X['junction_length'].values
kmeans.fit(lengths[idxs].reshape(-1,1))
dbscan = DBSCAN(min_samples=20, n_jobs=-1, algorithm='brute', eps=0.2,
metric=partial(distance_dataframe, X,
junction_dist=distances.StringDistance(model='ham'),
correct=True, tol=0))
dbscan_labels = np.zeros_like(kmeans.labels_).ravel()
for label in np.unique(kmeans.labels_):
idx_row = np.where(kmeans.labels_ == label)[0]
X_idx = idxs[idx_row].reshape(-1,1).astype('float64')
weights_idx = weights[idx_row]
if idx_row.size == 1:
db_labels = np.array([0])
db_labels = dbscan.fit_predict(X_idx, sample_weight=weights_idx)
if len(dbscan.core_sample_indices_) < 1:
db_labels[:] = 0
if -1 in db_labels:
# this means that DBSCAN found some IG as noise. We choose to assign to the nearest cluster
balltree = BallTree(
X_idx[dbscan.core_sample_indices_],
metric=dbscan.metric)
noise_labels = balltree.query(
X_idx[db_labels == -1], k=1, return_distance=False).ravel()
# get labels for core points, then assign to noise points based
# on balltree
dbscan_noise_labels = db_labels[
dbscan.core_sample_indices_][noise_labels]
db_labels[db_labels == -1] = dbscan_noise_labels
# hopefully, there are no noisy samples at this time
db_labels[db_labels > -1] = db_labels[db_labels > -1] + np.max(dbscan_labels) + 1
dbscan_labels[idx_row] = db_labels # + np.max(dbscan_labels) + 1
labels = dbscan_labels
# new part: put together the labels
labels_ext = np.zeros(X.shape[0], dtype=int)
labels_ext[idxs] = labels
for i, list_ in enumerate(groups):
labels_ext[list_] = labels[i]
labels = labels_ext
db_file = '../examples/data/clones_100.100.tab'
correct = True
tolerance = 0
X = io.load_dataframe(db_file)[:1000]
# turn the following off if data are real
X['true_clone'] = [x[3] for x in X.sequence_id.str.split('_')]
true_clones = LabelEncoder().fit_transform(X.true_clone.values)
ii = inference.ICINGTwoStep(
model='nt', eps=0.2, method='dbscan', verbose=True,
kmeans_params=dict(n_init=100, n_clusters=20),
dbscan_params=dict(min_samples=20, n_jobs=-1, algorithm='brute',
metric=partial(distance_dataframe, X, **dict(
junction_dist=StringDistance(model='ham'),
correct=correct, tol=tolerance))))
tic = time.time()
labels = ii.fit_predict(X)
tac = time.time() - tic
print("\nElapsed time: %.1fs" % tac)
X['icing_clones (%s)' % ('_'.join(('StringDistance', str(eps), '0', 'corr' if correct else 'nocorr',
"%.4f" % tac)))] = labels
X.to_csv(db_file.split('/')[-1] + '_icing.csv')
from sklearn import metrics
true_clones = LabelEncoder().fit_transform(X.true_clone.values)
print "FMI: %.5f" % (metrics.fowlkes_mallows_score(true_clones, labels))
print "ARI: %.5f" % (metrics.adjusted_rand_score(true_clones, labels))
print "AMI: %.5f" % (metrics.adjusted_mutual_info_score(true_clones, labels))
print "NMI: %.5f" % (metrics.normalized_mutual_info_score(true_clones, labels))
print "Hom: %.5f" % (metrics.homogeneity_score(true_clones, labels))
print "Com: %.5f" % (metrics.completeness_score(true_clones, labels))
print "Vsc: %.5f" % (metrics.v_measure_score(true_clones, labels))
labels = dbscan.fit_predict(np.arange(X.shape[0]).reshape(-1, 1))
print "FMI: %.5f" % metrics.fowlkes_mallows_score(true_clones, labels)
print "ARI: %.5f" % (metrics.adjusted_rand_score(true_clones, labels))
print "AMI: %.5f" % (metrics.adjusted_mutual_info_score(true_clones, labels))
print "NMI: %.5f" % (metrics.normalized_mutual_info_score(true_clones, labels))
print "Hom: %.5f" % (metrics.homogeneity_score(true_clones, labels))
print "Com: %.5f" % (metrics.completeness_score(true_clones, labels))
print "Vsc: %.5f" % (metrics.v_measure_score(true_clones, labels))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Preprocessing step
Step2: 2. High-level group inference
Step3: 3. Fine-grained group inference
Step4: Quickstart
Step5: If you want to save the results
Step6: How is the result?
Step7: Is it better or worse than the result with everyone at the same time?
|
10,232
|
<ASSISTANT_TASK:>
Python Code:
import pylab as pl
import casadi as ca
import casiopeia as cp
x = ca.MX.sym("x", 4)
u = ca.MX.sym("u", 1)
eps_u = ca.MX.sym("eps_u", 1)
p = ca.MX.sym("p", 3)
k_M = p[0]
c_M = p[1]
c_m = p[2]
M = 250.0
m = 50.0
p_scale = [1e3, 1e4, 1e5]
f = ca.vertcat( \
x[1], \
(p_scale[0] * k_M / m) * (x[3] - x[1]) + (p_scale[1] * c_M / m) * (x[2] - x[0]) - (p_scale[2] * c_m / m) * (x[0] - (u + eps_u)), \
x[3], \
-(p_scale[0] * k_M / M) * (x[3] - x[1]) - (p_scale[1] * c_M / M) * (x[2] - x[0]) \
)
phi = x
system = cp.system.System( \
x = x, u = u, p = p, f = f, phi = phi, eps_u = eps_u)
T = 5.0
N = 100
k_M_true = 4.0
c_M_true = 4.0
c_m_true = 1.6
p_true = [k_M_true, c_M_true, c_m_true]
time_points = pl.linspace(0, T, N+1)
u0 = 0.05
udata = u0 * pl.sin(2 * pl.pi * time_points[:-1])
simulation_true_parameters = cp.sim.Simulation( \
system = system, pdata = p_true)
sigma_u = 0.005
udata_noise = udata + sigma_u * pl.randn(*udata.shape)
# Plot controls
f = pl.figure()
ax = pl.gca()
ax.step(time_points[:-1], udata, label = "u_init")
ax.step(time_points[:-1], udata_noise, label = "u_init_noise")
ax.legend(loc = "best")
ax.set_xlabel("t")
ax.set_ylabel("u")
pl.show()
x0 = pl.zeros(x.shape)
simulation_true_parameters.run_system_simulation( \
x0 = x0, time_points = time_points, udata = udata_noise)
ydata = simulation_true_parameters.simulation_results.T
sigma_y = pl.array([0.01, 0.01, 0.01, 0.01])
ydata_noise = ydata + sigma_y * pl.randn(*ydata.shape)
# Plot simulation results
f = pl.figure()
ax = pl.gca()
ax.plot(time_points, ydata_noise[:,0], label = "x_T")
ax.plot(time_points, ydata[:,0], label = "x_T_noise")
ax.legend(loc = "best")
ax.set_xlabel("t")
ax.set_ylabel("x")
pl.show()
wv = (1.0 / sigma_y**2) * pl.ones(ydata.shape)
weps_u = (1.0 / sigma_u**2) * pl.ones(udata.shape)
pe = cp.pe.LSq(system = system, \
time_points = time_points, \
udata = udata, \
pinit = [1.0, 1.0, 1.0], \
ydata = ydata_noise, \
xinit = ydata_noise, \
wv = wv,
weps_u = weps_u,
discretization_method = "multiple_shooting")
pe.run_parameter_estimation()
pe.compute_covariance_matrix()
pe.print_estimation_results()
p_for_oed = pe.estimated_parameters
ulim = 0.05
umin = -ulim
umax = +ulim
xlim = [0.1, 0.4, 0.1, 0.4]
xmin = [-lim for lim in xlim]
xmax = [+lim for lim in xlim]
sigma_y = pl.array([0.01, 0.01, 0.01, 0.01])
sigma_u = 0.005
wv = (1.0 / sigma_y**2) * pl.ones(ydata.shape)
weps_u = (1.0 / sigma_u**2) * pl.ones(udata.shape)
doe = cp.doe.DoE(system = system, time_points = time_points, \
uinit = udata, pdata = p_for_oed, \
x0 = ydata[0,:], \
wv = wv, weps_u = weps_u, \
umin = umin, umax = umax, \
xmin = xmin, xmax = xmax)
# doe.run_experimental_design(solver_options = {"ipopt": {"linear_solver": "ma86"}})
# u_opt = doe.design_results
u_opt = pl.loadtxt("u_opt.txt")
# Plot controls
f = pl.figure()
ax = pl.gca()
ax.step(time_points[:-1], udata, label = "u_init")
ax.step(time_points[:-1], u_opt, label = "u_opt")
ax.legend(loc = "best")
ax.set_xlabel("t")
ax.set_ylabel("u")
pl.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.) System definition
Step2: 2.) Simulation
Step3: 3.) Parameter estimation for initial experiment
|
10,233
|
<ASSISTANT_TASK:>
Python Code:
print 'Hello World!'
# this is a comment!
'''
This is technically just
a multiline string but
ususually it's used as a
multiline comment.
'''
b = True # bool
s = 'This is a string' # str
i = 4 # int
f = 4.1 # float
d = {'foo': 1, 'bar': 2} # dict
l = [3,2,1] # list
t = (1,2,3) # tuple
print d['foo']
print l[2]
print t[1]
b = None
s = None
print "Our float value is %s. Our int value is %s." % (f, i)
def add2(x):
return x + 2
add2(10)
if i == 1 and f > 4:
print "The value of i is 1 and f is greater than 4."
elif i > 4 or f > 4:
print "i is greater than 4 or f is greateer than 4."
else:
print "Both i and f are less or equal to 4."
for i in l:
print i
while i < 10:
print i
i += 1
import graphlab
sf = graphlab.SFrame.read_csv('https://static.turi.com/datasets/coursera/toy_datasets/people-example.csv')
sf # you can view the contents
# you can explore summaries of the data
sf.show()
# you can also do this inline
graphlab.canvas.set_target('ipynb')
sf['age'].show(view='Categorical')
sf['Country']
# add a new column called "Full Name":
sf['Full Name'] = sf['First Name'] + ' ' + sf['Last Name']
sf
# You can filter finding all rows that match a logical condition
sf[sf['Full Name'] == 'Felix Brown']
# You can do math
print sf['age']
print sf['age'].mean()
print sf['age'].std()
print sf['age']*2
print sf['age']+2*sf['age']
sf['Country']
def transform_country(country):
if country == 'USA':
return 'United States'
else:
return country
sf['Country'].apply(transform_country)
sf['Country'] = sf['Country'].apply(lambda cur_value: 'United States' if cur_value == 'USA' else cur_value)
sf.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In Python single line comments are started with a <b>#</b>.
Step2: Python doesn't actually have built in support of multiline comments. However this can be done by just creating a multine string and not setting it equal to anything. Multiline string are started and ended with three single quotes or three double quotes. (Single and double quotes are equivant in Python.)
Step3: Python has several built in data types. The simple built in types are called
Step4: Python has other built in types that are compound types (i.e. types composed of other types). The most common are
Step5: Tuples are like lists except they are immutable. Strings are also immutable.
Step6: You can print the value of variable inside of strings by using the <b>%</b> operator and placing <b>%s</b> inside of the string. For example
Step7: You create a functions by using the <b>def</b> keyword. Here is an example of a function called <i>add2</i> that takes a value called <i>x</i> return the value of two added to it.
Step8: Like most programming languages, Python has <b>if</b> and <b>else</b> statements. The <b>elif</b> keyword is used for else-if statements. Unlike a lot of programming language, white space is meaningfull; the body of if-statements must be indented from its test-expression. Python doesn't use braces.
Step9: Python has two types of loops, <b>for</b> loops and <b>while</b> loops.
Step10: While-loops are executed as long as the given expression is True.
Step11: Notice the use of "+=" to increment. Unlike a lot of programming languages, Python does not have a increment or decrement operator.
Step12: Using GraphLab Create, we can easily read in comma seperated file.
Step13: SFrame basics
Step14: Suppose we just wanted to look a single column.
Step15: You can add columns.
Step16: On the countries, notice that we have two country values that mean the same thing
Step17: We could also have used a <b>lambda</b> function in the apply. Lambdas are just inline, unamed functions. Lambdas also don't have explict return statements. What the expression evaluates to will be automatically returned
|
10,234
|
<ASSISTANT_TASK:>
Python Code:
#Ejemplo de Consulta
import Consulta as C
from IPython.display import display, Markdown
display(Markdown(C.F_Mark()))
%%bash
Rscript "Estadistica_Descriptiva.R"
%%bash
python Estadistica_Inferencial.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ESTADÍSTICA Y ANÁLISIS
Step2: Estadística de números
|
10,235
|
<ASSISTANT_TASK:>
Python Code:
data = \
'''
<html>
<head>
<meta charset="utf-8">
<title>Homepage of Prof. Dr. Karl Stroetmann</title>
<link type="text/css" rel="stylesheet" href="style.css" />
<link href="http://fonts.googleapis.com/css?family=Rochester&subset=latin,latin-ext"
rel="stylesheet" type="text/css">
<link href="http://fonts.googleapis.com/css?family=Pacifico&subset=latin,latin-ext"
rel="stylesheet" type="text/css">
<link href="http://fonts.googleapis.com/css?family=Cabin+Sketch&subset=latin,latin-ext" rel="stylesheet" type="text/css">
<link href="http://fonts.googleapis.com/css?family=Sacramento" rel="stylesheet" type="text/css">
</head>
<body>
<hr/>
<div id="table">
<header>
<h1 id="name">Prof. Dr. Karl Stroetmann</h1>
</header>
<div id="row1">
<div class="right">
<a id="dhbw" href="http://www.ba-stuttgart.de">Duale Hochschule Baden-Württemberg</a>
<br/>Coblitzallee 1-9
<br/>68163 Mannheim
<br/>Germany
<br>
<br/>Office: Raum 344B
<br/>Phone: +49 621 4105-1376
<br/>Fax: +49 621 4105-1194
<br/>Skype: karlstroetmann
</div>
<div id="links">
<strong class="some">Some links:</strong>
<ul class="inlink">
<li class="inlink">
My <a class="inlink" href="https://github.com/karlstroetmann?tab=repositories">lecture notes</a>,
as well as the programs presented in class, can be found
at <br>
<a class="inlink" href="https://github.com/karlstroetmann?tab=repositories">https://github.com/karlstroetmann</a>.
</li>
<li class="inlink">Most of my papers can be found at <a class="inlink" href="https://www.researchgate.net/">researchgate.net</a>.</li>
<li class="inlink">The programming language SetlX can be downloaded at <br>
<a href="http://randoom.org/Software/SetlX"><tt class="inlink">http://randoom.org/Software/SetlX</tt></a>.
</li>
</ul>
</div>
</div>
</div>
<div id="intro">
As I am getting old and wise, I have to accept the limits of
my own capabilities. I have condensed these deep philosophical
insights into a most beautiful pearl of poetry. I would like
to share these humble words of wisdom:
<div class="poetry">
I am a teacher by profession, <br>
mostly really by obsession; <br>
But even though I boldly try, <br>
I just cannot teach <a href="flying-pig.jpg" id="fp">pigs</a> to fly.</br>
Instead, I slaughter them and fry.
</div>
<div class="citation">
<div class="quote">
Any sufficiently advanced poetry is indistinguishable from divine wisdom.
</div>
<div id="sign">His holiness Pope Hugo Ⅻ.</div>
</div>
</div>
</div>
</body>
</html>
'''
HTML(data)
import ply.lex as lex
tokens = [ 'HEAD_START',
'HEAD_END'
'SCRIPT_START',
'SCRIPT_END',
'TAG',
'LINEBREAK',
'NAMED_ENTITY',
'UNICODE',
'ANY'
]
states = [ ('header', 'exclusive'),
('script', 'exclusive')
]
def t_HEAD_START(t):
r'<head>'
t.lexer.begin('header')
def t_SCRIPT_START(t):
r'<script>'
t.lexer.begin('script')
def t_LINEBREAK(t):
r'(\s*\n\s*)+'
print()
def t_TAG(t):
r'<[^>]+>'
pass
from html.entities import html5
html5['auml']
def t_NAMED_ENTITY(t):
r'&[a-zA-Z]+;?'
if t.value[-1] == ';': # ';' is not part of the entity name
entity_name = t.value[1:-1] # so chop it off
else:
entity_name = t.value[1:]
unicode_char = html5[entity_name]
print(unicode_char, end='') # don't print a line break
def t_UNICODE(t):
r'&\#[0-9]+;?'
if t.value[-1] == ';':
number = t.value[2:-1]
else:
number = t.value[2:]
print(chr(int(number)), end='')
chr(8555)
chr(128034)
def t_ANY(t):
r'.'
print(t.value, end='')
def t_header_HEAD_END(t):
r'</head>'
t.lexer.begin('INITIAL')
def t_script_SCRIPT_END(t):
r'</script>'
t.lexer.begin('INITIAL')
def t_header_script_ANY(t):
r'.|\n'
pass
def t_error(t):
print(f"Illegal character: '{t.value[0]}'")
t.lexer.skip(1)
def t_header_error(t):
print(f"Illegal character in state 'header': '{t.value[0]}'")
t.lexer.skip(1)
def t_script_error(t):
print(f"Illegal character in state 'script': '{t.value[0]}'")
t.lexer.skip(1)
__file__ = 'main'
lexer = lex.lex(debug=True)
lexer.input(data)
def scan(lexer):
for t in lexer:
pass
scan(lexer)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports
Step2: Token Declarations
Step3: Definition of the States
Step4: Token Definitions
Step5: The Definition of the Token SCRIPT_START
Step6: The Definition of the Token `LINEBREAK``
Step7: The Definition of the Token TAG
Step8: The Definition of the Token NAMED_ENTITY
Step9: The regular expression &[a-zA-Z]+;? searches for <span style="font-variant
Step10: The Definition of the Token UNICODE
Step11: The Definition of the Token ANY
Step12: The Definition of the Token HEAD_END
Step13: The Definition of the Token SCRIPT_END
Step14: The Definition of the Token ANY
Step15: Error Handling
Step16: The function t_header_error is called when a substring at the beginning of the input can not be matched by any of the regular expressions defined in the various tokens and the scanner is in state header. Actually, this function can never be called.
Step17: The function t_script_error is called when a substring at the beginning of the input can not be matched by any of the regular expressions defined in the various tokens and the scanner is in state script. Actually, this function can never be called.
Step18: Running the Scanner
Step19: The line below generates the scanner. Because the option debug=True is set, we can see the regular expression that is generated for scanning.
Step20: Next, we feed our input string into the generated scanner.
Step21: In order to scan the data that we provided in the last line, we iterate over all tokens generated by our scanner.
|
10,236
|
<ASSISTANT_TASK:>
Python Code:
# standard imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xarray as xr
import warnings
%matplotlib inline
np.set_printoptions(precision=3, linewidth=80, edgeitems=1) # make numpy less verbose
xr.set_options(display_width=70)
warnings.simplefilter('ignore') # filter some warning messages
import numpy as np
a = np.array([[1, 3, 9], [2, 8, 4]])
a
a[1, 2]
a.mean(axis=0)
ds = xr.tutorial.load_dataset('air_temperature')
ds
ds.air
ds.dims
ds.attrs
ds.air.values
type(ds.air.values)
ds.air.dims
ds.air.attrs
ds.air.attrs['tutorial-date'] = 27042017
ds.air.attrs
kelvin = ds.air.mean(dim='time')
kelvin.plot();
centigrade = kelvin - 273.16
centigrade.plot();
# ufuncs work too
np.sin(centigrade).plot();
ds
ds['centigrade'] = centigrade
ds['kelvin'] = kelvin
ds
ds.kelvin.attrs # attrs are empty! Let's add some
ds.kelvin.attrs['Description'] = 'Mean air tempterature (through time) in kelvin.'
ds.kelvin
ds.to_netcdf('new file.nc')
ds.air[:, 1, 2] # note that the attributes, coordinates are preserved
ds.air[:, 1, 2].plot();
ds.air.isel(time=0).plot(); # like above, but with a dimension name this time
ds.air.sel(lat=72.5, lon=205).plot();
ds.air.sel(time='2013-01-02').plot(); # Note that we will extract 4 time steps! 3d data is plotted as histogram
ds.air.sel(time='2013-01-02T06:00').plot(); # or look at a single timestep
ds.air.sel(lat=slice(60, 50), lon=slice(200, 270), time='2013-01-02T06:00:00').plot();
ds.air.sel(lat=41.8781, lon=360-87.6298, method='nearest', tolerance=5).plot();
a = xr.DataArray(np.arange(3), dims='time',
coords={'time':np.arange(3)})
b = xr.DataArray(np.arange(4), dims='space',
coords={'space':np.arange(4)})
a + b
atime = np.arange(3)
btime = np.arange(5) + 1
atime, btime
a = xr.DataArray(np.arange(3), dims='time',
coords={'time':atime})
b = xr.DataArray(np.arange(5), dims='time',
coords={'time':btime})
a + b
ds.max()
ds.air.median(dim=['lat', 'lon']).plot();
means = ds.air.mean(dim=['time'])
means.where(means > 273.15).plot();
ds.air.groupby('time.season').mean()
ds.air.groupby('time.month').mean('time')
clim = ds.air.groupby('time.month').mean('time')
anomalies = ds.air.groupby('time.month') - clim
anomalies
anomalies.plot();
anomalies.sel(time= '2013-02').plot(); # Find all the anomolous values for February
tmin = ds.air.resample('1D', dim='time', how='min') # Resample to one day '1D
tmax = ds.air.resample('1D', dim='time', how='max')
(tmin.sel(time='2013-02-15') - 273.15).plot();
ds_extremes = xr.Dataset({'tmin': tmin, 'tmax': tmax})
ds_extremes
zonal_t_average = ds.air.mean(dim=['lon', 'time']) - 273.15
zonal_t_average.plot(); # 1D arrays are plotted as line plots
t_average = ds.air.mean(dim='time') - 273.15
t_average.plot(); # 2D arrays are plotted with pcolormesh
t_average.plot.contourf(); # but you can use contour(), contourf() or imshow() if you wish
t_average.plot.contourf(cmap='BrBG_r', vmin=-15, vmax=15);
t_average.plot.contourf(cmap='BrBG_r', levels=22, center=False);
air_outliers = ds.air.isel(time=0).copy()
air_outliers[0, 0] = 100
air_outliers[-1, -1] = 400
air_outliers.plot(); # outliers mess with the datarange and colorscale!
# Using `robust=True` uses the 2nd and 98th percentiles of the data to compute the color limits.
air_outliers.plot(robust=True);
t_season = ds.air.groupby('time.season').mean(dim='time') - 273.15
# facet plot allows to do multiplot with the same color mappings
t_season.plot.contourf(x='lon', y='lat', col='season', col_wrap=2, levels=22);
import cartopy.crs as ccrs
f = plt.figure(figsize=(8, 4))
# Define the map projection *on which* you want to plot
ax = plt.axes(projection=ccrs.Orthographic(-80, 35))
# ax is an empty plot. We now plot the variable t_average onto ax
# the keyword "transform" tells the function in which projection the air temp data is stored
t_average.plot(ax=ax, transform=ccrs.PlateCarree())
# Add gridlines and coastlines to the plot
ax.coastlines(); ax.gridlines();
# this time we need to retrieve the plots to do things with the axes later on
p = t_season.plot(x='lon', y='lat', col='season', transform=ccrs.PlateCarree(),
subplot_kws={'projection': ccrs.Orthographic(-80, 35)})
for ax in p.axes.flat:
ax.coastlines()
import seaborn as sns
data = (ds_extremes
.sel_points(lat=[41.8781, 37.7749], lon=[360-87.6298, 360-122.4194],
method='nearest', tolerance=3,
dim=xr.DataArray(['Chicago', 'San Francisco'],
name='location', dims='location'))
.to_dataframe()
.reset_index()
.assign(month=lambda x: x.time.dt.month))
plt.figure(figsize=(10, 5))
sns.violinplot('month', 'tmax', 'location', data=data, split=True, inner=None);
from glob import glob
files = glob('data/*dis*.nc')
runoff = xr.open_mfdataset(files)
runoff
runoff.time
runoff.nbytes / 1e9 # Convert to gigiabytes
runoff = runoff.chunk({'lat': 60})
runoff.chunks
%time ro_seasonal = runoff.groupby('time.season').mean('time')
import dask
from multiprocessing.pool import ThreadPool
dask.set_options(pool=ThreadPool(1))
%time ro_seasonal.compute()
dask.set_options(pool=ThreadPool(4))
%time ro_seasonal = runoff.groupby('time.season').mean('time')
%time result = ro_seasonal.compute()
brazil = dict(lat=slice(10.75, -40.75), lon=slice(-100.25, -25.25))
result.dis.sel(**brazil).plot(col='season', size=4, cmap='Spectral_r')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic data arrays in numpy
Step2: numpy is a powerful but "low-level" array manipulation tool. Axis only have numbers and no names (it is easy to forget which axis is what, a common source of trivial bugs), arrays can't carry metadata (e.g. units), and the data is unstructured (i.e. the coordinates and/or other related arrays have to be handled separately
Step3: Let's Do Some Math
Step4: Notice xarray has changed the colormap according to the dataset (borrowing logic from Seaborn).
Step5: Adding Data to DataSets
Step6: Let's add those kelvin and centigrade dataArrays to the dataset.
Step7: 3. Selecting data with named dimensions
Step8: This selection implies prior knowledge about the structure of the data, and is therefore much less readable than the "xarray methods" presented below.
Step9: Selection by value
Step10: Selection by value works well for time, too
Step11: Selecting a range of values
Step12: Nearest neighbor lookup
Step13: 4. Operations and computation
Step14: Alignment
Step15: Aggregation
Step16: Masking with .where()
Step17: 5. Groupby and "split-apply-combine"
Step18: <img src="./figures/split_apply-combine.png" alt="split" style="width
Step19: You can also do arithmetic with groupby objects, which repeats the arithmetic over each group
Step20: Resample adjusts a time series to a new resolution
Step21: 6. Graphics
Step22: 2D plots
Step23: Customizing 2d plots
Step24: Dealing with Outliers
Step25: Facet plots
Step26: Plotting on maps
Step27: Facet plots on maps
Step28: Seaborn is Cool
Step29: 7. Out-of-core computation
Step30: xarray even puts them in the right order for you.
Step31: How big is all this data uncompressed? Will it fit into memory?
Step32: Working with Big Data
|
10,237
|
<ASSISTANT_TASK:>
Python Code:
#This line is very important: (It turns on the inline visuals!)
%pylab inline
a = [2,9,32,12,14,6,9,23,4,5,13,6,7,92,21,45];
b = [7,21,4,2,92,9,9,6,13,12,45,5,6,23,14,32];
#Please calculate the dot product of the vectors 'a' and 'b'.
#You may use any method you like. If get stuck. Check:
#http://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html
#If you rearrange the numbers in 'b', what sequence will give
#the highest dot-product magnitude?
#The cross-correlation algorithm is another name for the Pearson's test.
#Here it is written in code form and utilising the builtin functions:
c = [0,1,2]
d = [3,4,5]
rho = np.average((c-np.average(c))*(d-np.average(d)))/(np.std(c)*np.std(d))
print('rho',round(rho,3))
#equally you can write
rho = np.dot(c-np.average(c),d-np.average(d))/sqrt(((np.dot(c-np.average(c),c-np.average(c)))*np.dot(d-np.average(d),d-np.average(d))))
print('rho',round(rho,3))
#Why is the rho for c and d, 1.0?
#Edit the variables c and d and find the pearson's value for 'a' and 'b'.
#What happens when you correlate 'a' with 'a'?
#Here is an image from the Fiji practical
from tifffile import imread as imreadtiff
im = imreadtiff('neuron.tif')
print('image dimensions',im.shape, ' im dtype:',im.dtype)
subplot(2,2,1)
imshow(im[0,:,:],cmap='Blues_r')
subplot(2,2,2)
imshow(im[1,:,:],cmap='Greens_r')
subplot(2,2,3)
imshow(im[2,:,:],cmap='Greys_r')
subplot(2,2,4)
imshow(im[3,:,:],cmap='Reds_r')
a = im[0,:,:].reshape(-1)
b = im[3,:,:].reshape(-1)
#Calculate the pearson's coefficent (rho) for the image channel 0, 3.
#You should hopefully obtain a value 0.829
#from tifffile import imread as imreadtiff
im = imreadtiff('composite.tif')
#The organisation of this file is not simple. It is also a 16-bit image.
print("shape of im: ",im.shape,"bit-depth: ",im.dtype)
#We can assess the image data like so.
CH0 = im[0,0,:,:]
CH1 = im[1,0,:,:]
#Single channels visualisation can handle 16-bit
subplot(2,2,1)
imshow(CH0,cmap='Reds_r')
subplot(2,2,2)
imshow(CH1,cmap='Greens_r')
subplot(2,2,3)
#RGB data have to range between 0 and 255 in each channel and be int (8-bit).
imRGB = np.zeros((CH0.shape[0],CH0.shape[1],3))
imRGB[:,:,0] = CH0/255.0
imRGB[:,:,1] = CH1/255.0
imshow(255-imRGB.astype(int8))
#What is the current Pearson's value for this image?
rho_max = 0
#This moves one of your images with respect to the other.
for c in range(1,40):
for r in range(1,40):
#We need to dynamically sample our image.
temp = CH0[c:-40+c,r:-40+r].reshape(-1);
#The -40 makes sure they are the same size.
ref = CH1[:-40,:-40].reshape(-1);
rho = np.dot(temp-np.average(temp),ref-np.average(ref))/sqrt(((np.dot(temp-np.average(temp),temp-np.average(temp)))*np.dot(ref-np.average(ref),ref-np.average(ref))))
#You will need to work out the highest rho value is recorded.
#You will then need to find the coordinates of this high rho.
#You will then need to provide a visualisation with the image translated.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Pearson's test
Step2: Pearson's comparison of microscopy derived images
Step3: Maybe remove so not to clash with Mark's.
|
10,238
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from pwoogs import moog,estimate,utils
import matplotlib.pyplot as plt
import q2
import shutil as sh
%matplotlib inline
# Getting star names
star_names = np.loadtxt('s_twins.csv',
skiprows=1,
usecols=(0,),
dtype=str,
delimiter=',')
# This is used to manage data arrays
u = utils.arr_manage()
def set_star(choice, **kwargs):
if ('inverted_filelist' in kwargs):
invert = kwargs['inverted_filelist']
else:
invert = False
choice = int(choice)
print 'Creating the stellar atmosphere file.'
data = np.loadtxt('s_twins.csv',
usecols=(1,2,3,4),
skiprows=1,
delimiter=',')
model = q2.modatm.interpolate(data[choice,0],
data[choice,1],
data[choice,2],
grid='odfnew')
N = len(model['RHOX'])
with open('star.mod','w') as f:
f.truncate()
f.write(
KURTYPE
%.1f/ %.2f/ %.2f mic = %.2f
%i
5000.0\n % (data[choice,0],
data[choice,1],
data[choice,2],
data[choice,3],
N)
)
for i in range(N):
f.write(" %.8E %.1f %.3E %.3E\n" % (
model['RHOX'][i],
model['T'][i],
model['P'][i],
model['XNE'][i])
)
f.write(' %.2f\n' % data[choice,3])
f.write(
NATOMS 0 %.2f
NMOL 28
101.0 106.0 107.0 108.0 112.0 126.0
606.0 607.0 608.0
707.0 708.0
808.0 812.0 822.0 823.0 840.0
10108.0 10820.0 60808.0
6.1 7.1 8.1 12.1 20.1 22.1 23.1 26.1 40.1
% data[choice,2]
)
# filenames.lis contains the names of all the fits files of the spectra
# In my case, the list is in an opposite order as the list of star names,
# so that's choice is re-set if invert == True
filename = np.loadtxt('filenames.lis',str)
if invert == True:
choice = len(star_names)-1-choice
print "Creating the spectrum_full.dat file for %s" % filename[choice]
sh.copyfile(filename[choice],'spectrum_full.dat')
def v_m(T):
return 3.6+(T-5777.)/486
def manage(choice, interval, lines, chunk):
print 'Managing the data file.'
spec_window = np.array([lines[choice,0]-interval/2,lines[choice,0]+interval/2])
u.cut(spec_window[0]-chunk,spec_window[1]+chunk,'spectrum_full.dat','spectrum.dat')
print 'Done.\n'
return spec_window
def correct(choice, data, lines, cont_type, r_1, r_2, r_3):
# The following lines are used to find calibration corrections for the spectral line
print 'Finding the shift on the wavelength.'
wl_shift = 10.
ind = u.find_index(lines[choice,0],data[:,0])
while abs(wl_shift) > 2.0:
center = u.find_center(data[ind-r_1+1:ind+r_1+2])
wl_shift = lines[choice,0]-center
print 'Wavelength shift = %.4f\n' % wl_shift
print "Finding the correction factor for the continuum."
ind_min = u.find_index(lines[choice,0]-r_2,data[:,0])
ind_max = u.find_index(lines[choice,0]+r_2,data[:,0])
if cont_type == 'single':
corr = 1.0/np.mean(u.find_corr(
data[ind_min:ind_max,:],
r_3
))
elif cont_type == 'multi':
target_wls = np.loadtxt('continuum.dat')
corr = 1.0/np.mean(u.find_corr_from_ensemble(
data[ind_min:ind_max,:],
target_wls[choice,:],
r_3
))
print "Correction factor = %.4f" % corr
return wl_shift, corr
def full_auto(choice, interval, res_power, SN, **kwargs):
# Spectrum chunk size. Default = 10. angstroms
if ('chunk' in kwargs):
chunk = kwargs['chunk']
assert chunk > interval, 'Invalid chunk size'
else:
chunk = 10.0
# Continuum correction: choose between 'single' or 'multi' wavelengths
if ('continuum_correction' in kwargs):
cont_type = kwargs['continuum_correction']
assert cont_type == 'multi', 'Continuum correction type invalid'
else:
cont_type = 'single'
# Radius of points to be used in finding the correction for the line center
# Default = 3
if ('r_1' in kwargs):
radius_1 = kwargs['r_1']
assert radius_1 > 0, 'Invalid radius for line center correction'
else:
radius_1 = 3
# Radius in angstroms for the region around the target wavelength to be
# analyzed for the continuum . Default = 3.0
if ('r_2' in kwargs):
radius_2 = kwargs['r_2']
assert radius_2 > 0, 'Invalid radius of wavelength region'
else:
radius_2 = 3.0
# Radius in points to be used in finding the correction for the continuum.
# Default = 2
if ('r_3' in kwargs):
radius_3 = kwargs['r_3']
assert radius_3 > 0, 'Invalid radius for continuum correction'
else:
radius_3 = 2
# Radius in points to be used in evaluating the performance function
# Default = 7
if ('r_4' in kwargs):
radius_4 = kwargs['r_4']
assert radius_4 > 0, 'Invalid radius for performance evaluation'
else:
radius_4 = 7
# Blue wing weight to be used on estimation. Default = 10.0
if ('bw' in kwargs):
bw = kwargs['bw']
assert bw >= 0.0, 'Invalid weight for blue wing'
else:
bw = 10.0
# Red wing weight to be used on estimation. Default = 5.0
if ('rw' in kwargs):
rw = kwargs['rw']
assert rw >= 0.0, 'Invalid weight for red wing'
else:
rw = 5.0
# Line center weight to be used on estimation. Default = 25.0
if ('cw' in kwargs):
cw = kwargs['cw']
assert cw >= 0.0, 'Invalid weight for line center'
else:
cw = 25.0
# Bad fit tolerance in number of points above the S/N ratio. Default = 2
if ('tol' in kwargs):
tol = kwargs['tol']
assert tol >= 0, 'Invalid tolerance'
else:
tol = 2
# 'plot' on window or 'save' as png? Default = plot on window
if ('output' in kwargs):
output = kwargs['output']
assert output == 'save', 'Invalid radius for continuum correction'
else:
output = 'plot'
choice = int(choice)
# Synthesis parameters
line_file = 'lines.dat'
lines = np.loadtxt(line_file,skiprows=1,usecols=(0,1))
# Star parameters
star_info = np.genfromtxt('star.mod',skip_header=1,skip_footer=83,
usecols=(0,1),delimiter='/ ')
T_star = star_info[0]
v_macro = v_m(T_star)
data = np.loadtxt('spectrum.dat')
# Managing the data file
spec_window = manage(choice, interval, lines, chunk)
# The instrumental broadening
gauss = np.mean(spec_window)/res_power
# Finding the correction factors
wl_shift, corr = correct(choice, data, lines, cont_type, radius_1, radius_2,
radius_3)
print "Now starting estimation of vsini..."
# Instatiating the function to write parameters for MOOG
r = estimate.vsini(
spec_window,
gauss,
v_macro,
line_file,
choice,
x_wl=wl_shift,
y_mult=corr,
bwing_w = bw,
rwing_w = rw,
center_w = cw,
perf_radius=radius_4,
SN=SN,
badfit_tol = tol,
star_name=star_names[m]
)
if output == 'plot':
save = 'window'
else:
save = '%s_line%i.png'%(star_names[m],choice)
# Finding vsini and abundance
vsini,abund,bfs = r.find(N=15,
max_i=20,
min_i=10,
limits=[0.05,0.001],
save=save)
return vsini,abund,bfs
m = 88
set_star(m, inverted_filelist=True)
v, a, b = full_auto(choice=5, interval=1.0, r_1=3, r_2=3.0, r_4=7, res_power=65000., SN=400)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The following function is used to set the input files to be used by MOOG for a specific star from the list star_names.
Step4: v_m returns the macroturbulent velocity for a solar twin with a temperature T.
Step5: Managing data file because it is possibly huge
Step6: Function that returns the corrections factors for line center and continuum
Step7: Next, there is the code that finds the v sin(i) in fully automatic mode, for a specific star.
|
10,239
|
<ASSISTANT_TASK:>
Python Code:
4
2 + 2
50 - 5*6
(50-5)*6
8/5
8//5 # Floor division discards the fractional part
8%5 # The % operator return the remainder of the division
5 ** 3 # 5 squared
type(4)
type(1.3)
width = 30
width
width = 30
height = 2
width * height
s = 3 * 4
s # Try to access an undefined variable
print(5)
print(width)
empty_list = []
empty_list
type(empty_list)
squares = [1, 4, 9, 16, 25]
squares
squares[0]
squares[1]
len(squares)
squares[len(squares) - 1]
squares[-1] # Last item
squares[-2] # Second-last item
squares = [1, 2, 3, 4, 5]
squares[1:4] # Get sub-list from the 1 to 4 with step size 1
squares[1:4:2] # Get sub-list from 1 to 4 with step size 2
squares[:4] # from begining of the list to 4 with step size 1
squares[1:] # from 1 to the end of the list with step size 1
squares[::2] # from begining to the end with step size 2
names = []
names.append('John')
names
names.append('Paul')
names
names[1] = 'Alice'
names
names2 = ['Ryan', 'Tang']
names2
names = names + names2
names
names.append(['Fang', 'HE'])
names
squares.append(11)
squares
squares[1:3] = [12, 13, 14]
squares
len(squares) # calculate the number of item in the list
list1 = [1, 2]
list2 = [3, 4]
list3 = list1 + list2
list3
list1 = [1, 2, 3]
list2 = [x**2 for x in list1]
list2
'Hello World!'
type('hello')
'doesn\'t'
s = 'Hello World'
s
s[1:]
s[-1]
s1 = 'Hello '
s2 = 'World!'
s3 = s1 + s2
s3
s3.isdigit()
s3.isupper()
s3.lower()
squares = [1, 4, 9, 16, 25]
for s in squares:
print(s)
for i in range(10):
print(i)
for i in range(1, 10):
print(i)
for i in range(1, 10, 2):
print(i)
N = 8
fact = 1
for i in range(1, N+1):
fact = fact * i
fact
# For any i, we list the integer less than i
for i in range(5):
print("i=", i)
l = []
for j in range(0,i):
l.append(j)
print(l)
True
False
type(True)
3 > 1
3 < 1
3 == 1
3 == 3
3 != 2
b = 3 > 4
b
even_number = [2, 4, 6, 8, 10, 12]
even_number
6 in even_number
3 in even_number
empty = {}
empty
type(empty)
food = {'ham': 'yes',
'egg': 'yes',
'spam': 'no'}
food
food['ham']
food['spam'] = 'yes'
food
len(food)
# Accessing non Existing keys
food['apple']
# Merge dictionary
food2 = {'beef': 'yes',
'shoe': 'no'}
food.update(food2)
food
# Iterating over Dictionary
for key in food:
print(key)
print(food[key])
print('\n')
N = 10
if N%2 == 0:
print('Even')
else:
print('Odd')
N = 9
if N%2 == 0:
print('Even')
else:
print('Odd')
N = 9
if N%2 == 0 and N > 10:
print('A Even number greater than 10')
else:
print('smaller than 10 or odd number')
N = 0
while N <= 10:
if N%2 == 0:
print(N, 'is Even.')
else:
print(N, ' is Odd.')
N = N+1
# basic function
def add(x, y):
return x+y
add(2, 3)
# function with optional parameters
def add(x, y=5):
return x + y
add(3)
add(3, 9)
# Keyword Paramters
def add(x, y=5):
return x + y
add(3, y=10)
def isEven(N):
if N%2 == 0:
return True
else:
return False
isEven(10)
isEven(9)
def productEven(l):
ret = 1
for item in l:
if item % 2 == 0:
ret = ret * item
return ret
productEven([2, 3, 8, 7])
result_l = []
for i in range(2000, 3201):
if i%7 == 0 and i%5 != 0:
result_l.append(i)
result_l
l = [4, 2, 5, 6, 4]
count = 0
for item in l:
if item == 4:
count = count + 1
count
def gcd(a, b):
for i in range(max(a, b)+1, 0, -1):
if a%i == 0 and b%i == 0:
return i
gcd(4, 6)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: With Python, use ** operator to calculate powers.
Step2: Use equal sign(=) to assign a value to variable like math variable.
Step3: If a variable is not defined (assigned a value), trying to use it will give you an error
Step4: List
Step5: List can be indexed, with the first item having index 0.
Step6: In addition to indexing, slicing is also supported. While indexing is used to obtain individual item, slicing allows you to obtain sub-list
Step7: You can replace the item in the list and add new items at the end of the list.
Step8: List support the + operation.
Step9: Strings
Step10: String also support indexing and slicing like list. You can see the string as a list of charaters.
Step11: Python Iteration
Step12: Bool Type
Step13: Use bool operators to create bool type
Step14: Other functions may also create bool type.
Step15: Dict
Step16: Conditional Statement
Step17: Function
Step18: Exercises 1
Step19: Exercises 2
Step20: Exercises 3
Step21: Exercises 4
|
10,240
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
## Your code here
from collections import Counter
import random
word_counts = Counter(int_words)
t = 1e-5
total_words = len(int_words)
frequency = { word : float(count) / total_words for word, count in word_counts.items() }
p_drop = {word : 1 - np.sqrt(float(t)/frequency[word]) for word in word_counts }
train_words = [w for w in int_words if p_drop[w] < random.random()] # The final subsampled word list
#print (len(train_words))
#print(train_words[:30])
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
# Your code here
R = random.randint (1, window_size) # or window_size + 1?
start = idx - R if idx >= R else 0
end = idx + R + 1
return list (set(words[start:idx] + words[idx+1:end]))
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
train_graph = tf.Graph()
with train_graph.as_default():
# with tf.name_scope('input'):
inputs = tf.placeholder (tf.int32, shape=[None], name='inputs')
# with tf.name_scope('targets'):
labels = tf.placeholder (tf.int32, shape=[None,None], name='labels')
n_vocab = len(int_to_vocab)
n_embedding = 200 # Number of embedding features
with train_graph.as_default():
# with tf.name_scope('embeddings'):
embedding = tf.Variable (tf.random_uniform ([n_vocab, n_embedding], -1.0, 1.0, dtype=tf.float32), name='embedding') # create embedding weight matrix here
embed = tf.nn.embedding_lookup (embedding, inputs) # use tf.nn.embedding_lookup to get the hidden layer output
tf.summary.histogram ('embedding', embedding)
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable (tf.truncated_normal ([n_vocab, n_embedding], stddev=0.1, dtype=tf.float32), name='softmax_w') # create softmax weight matrix here
softmax_b = tf.Variable (tf.zeros (n_vocab, dtype=tf.float32), name='softmax_b') # create softmax biases here
tf.summary.histogram ('softmax_w', softmax_w)
tf.summary.histogram ('softmax_b', softmax_b)
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss (softmax_w, softmax_b, labels, embed, n_sampled, n_vocab, name='loss')
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
tf.summary.scalar ('cost', cost)
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 1
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
merged = tf.summary.merge_all()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter ("./logs/2/train", sess.graph)
test_writer = tf.summary.FileWriter ("./logs/2/test")
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
summary, train_loss, _ = sess.run([merged, cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
train_writer.add_summary (summary, iteration)
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
10,241
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import (
display, display_html, display_png, display_svg
)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.pylabtools import print_figure
from IPython.display import Image, SVG, Math
class Gaussian(object):
A simple object holding data sampled from a Gaussian distribution.
def __init__(self, mean=0.0, std=1, size=1000):
self.data = np.random.normal(mean, std, size)
self.mean = mean
self.std = std
self.size = size
# For caching plots that may be expensive to compute
self._png_data = None
def _figure_data(self, format):
fig, ax = plt.subplots()
ax.hist(self.data, bins=50)
ax.set_title(self._repr_latex_())
ax.set_xlim(-10.0,10.0)
data = print_figure(fig, format)
# We MUST close the figure, otherwise IPython's display machinery
# will pick it up and send it as output, resulting in a double display
plt.close(fig)
return data
def _repr_png_(self):
if self._png_data is None:
self._png_data = self._figure_data('png')
return self._png_data
def _repr_latex_(self):
return r'$\mathcal{N}(\mu=%.2g, \sigma=%.2g),\ N=%d$' % (self.mean,
self.std, self.size)
x = Gaussian(2.0, 1.0)
x
display(x)
display_png(x)
x2 = Gaussian(0, 2, 2000)
x2
display_png(x)
display_png(x2)
p = np.polynomial.Polynomial([1,2,3], [-10, 10])
p
def poly_to_latex(p):
terms = ['%.2g' % p.coef[0]]
if len(p) > 1:
term = 'x'
c = p.coef[1]
if c!=1:
term = ('%.2g ' % c) + term
terms.append(term)
if len(p) > 2:
for i in range(2, len(p)):
term = 'x^%d' % i
c = p.coef[i]
if c!=1:
term = ('%.2g ' % c) + term
terms.append(term)
px = '$P(x)=%s$' % '+'.join(terms)
dom = r', $x \in [%.2g,\ %.2g]$' % tuple(p.domain)
return px+dom
poly_to_latex(p)
from IPython.display import Latex
Latex(poly_to_latex(p))
ip = get_ipython()
for mime, formatter in ip.display_formatter.formatters.items():
print('%24s : %s' % (mime, formatter.__class__.__name__))
ip = get_ipython()
latex_f = ip.display_formatter.formatters['text/latex']
help(latex_f.for_type)
help(latex_f.for_type_by_name)
latex_f.for_type_by_name('numpy.polynomial.polynomial',
'Polynomial', poly_to_latex)
p
p2 = np.polynomial.Polynomial([-20, 71, -15, 1])
p2
import json
import uuid
from IPython.display import display_javascript, display_html, display
class FlotPlot(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.uuid = str(uuid.uuid4())
def _ipython_display_(self):
json_data = json.dumps(list(zip(self.x, self.y)))
display_html('<div id="{}" style="height: 300px; width:80%;"></div>'.format(self.uuid),
raw=True
)
display_javascript(
require(["//cdnjs.cloudflare.com/ajax/libs/flot/0.8.2/jquery.flot.min.js"], function() {
var line = JSON.parse("%s");
console.log(line);
$.plot("#%s", [line]);
});
% (json_data, self.uuid), raw=True)
import numpy as np
x = np.linspace(0,10)
y = np.sin(x)
FlotPlot(x, np.sin(x))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parts of this notebook need the matplotlib inline backend
Step3: Special display methods
Step4: Create an instance of the Gaussian distribution and return it to display the default representation
Step5: You can also pass the object to the display function to display the default representation
Step6: Use display_png to view the PNG representation
Step7: <div class="alert alert-success">
Step8: You can then compare the two Gaussians by displaying their histograms
Step9: Note that like print, you can call any of the display functions multiple times in a cell.
Step10: Next, define a function that pretty-prints a polynomial as a LaTeX string
Step11: This produces, on our polynomial p, the following
Step12: You can render this string using the Latex class
Step13: However, you can configure IPython to do this automatically by registering the Polynomial class and the plot_to_latex function with an IPython display formatter. Let's look at the default formatters provided by IPython
Step14: The formatters attribute is a dictionary keyed by MIME types. To define a custom LaTeX display function, you want a handle on the text/latex formatter
Step15: The formatter object has a couple of methods for registering custom display functions for existing types.
Step16: In this case, we will use for_type_by_name to register poly_to_latex as the display function for the Polynomial type
Step17: Once the custom display function has been registered, all NumPy Polynomial instances will be represented by their LaTeX form instead
Step19: More complex display with _ipython_display_
|
10,242
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
from shogun import *
import shogun as sg
#Needed lists for the final plot
classifiers_linear = []*10
classifiers_non_linear = []*10
classifiers_names = []*10
fadings = []*10
shogun_feats_linear = features(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'toy/classifier_binary_2d_linear_features_train.dat')))
shogun_labels_linear = BinaryLabels(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'toy/classifier_binary_2d_linear_labels_train.dat')))
shogun_feats_non_linear = features(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'toy/classifier_binary_2d_nonlinear_features_train.dat')))
shogun_labels_non_linear = BinaryLabels(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'toy/classifier_binary_2d_nonlinear_labels_train.dat')))
feats_linear = shogun_feats_linear.get('feature_matrix')
labels_linear = shogun_labels_linear.get('labels')
feats_non_linear = shogun_feats_non_linear.get('feature_matrix')
labels_non_linear = shogun_labels_non_linear.get('labels')
def plot_binary_data(plot,X_train, y_train):
This function plots 2D binary data with different colors for different labels.
plot.xlabel(r"$x$")
plot.ylabel(r"$y$")
plot.plot(X_train[0, np.argwhere(y_train == 1)], X_train[1, np.argwhere(y_train == 1)], 'ro')
plot.plot(X_train[0, np.argwhere(y_train == -1)], X_train[1, np.argwhere(y_train == -1)], 'bo')
def compute_plot_isolines(classifier,feats,size=200,fading=True):
This function computes the classification of points on the grid
to get the decision boundaries used in plotting
x1 = np.linspace(1.2*min(feats[0]), 1.2*max(feats[0]), size)
x2 = np.linspace(1.2*min(feats[1]), 1.2*max(feats[1]), size)
x, y = np.meshgrid(x1, x2)
plot_features=features(np.array((np.ravel(x), np.ravel(y))))
if fading == True:
plot_labels = classifier.apply(plot_features).get('current_values')
else:
plot_labels = classifier.apply(plot_features).get('labels')
z = plot_labels.reshape((size, size))
return x,y,z
def plot_model(plot,classifier,features,labels,fading=True):
This function plots an input classification model
x,y,z = compute_plot_isolines(classifier,features,fading=fading)
plot.pcolor(x,y,z,cmap='RdBu_r')
plot.contour(x, y, z, linewidths=1, colors='black')
plot_binary_data(plot,features, labels)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("Linear Features")
plot_binary_data(plt,feats_linear, labels_linear)
plt.subplot(122)
plt.title("Non Linear Features")
plot_binary_data(plt,feats_non_linear, labels_non_linear)
plt.figure(figsize=(15,5))
c = 0.5
epsilon =1e-3
svm_linear = LibLinear(c,shogun_feats_linear,shogun_labels_linear)
svm_linear.put('liblinear_solver_type', L2R_L2LOSS_SVC)
svm_linear.put('epsilon', epsilon)
svm_linear.train()
classifiers_linear.append(svm_linear)
classifiers_names.append("SVM Linear")
fadings.append(True)
plt.subplot(121)
plt.title("Linear SVM - Linear Features")
plot_model(plt,svm_linear,feats_linear,labels_linear)
svm_non_linear = LibLinear(c,shogun_feats_non_linear,shogun_labels_non_linear)
svm_non_linear.put('liblinear_solver_type', L2R_L2LOSS_SVC)
svm_non_linear.put('epsilon', epsilon)
svm_non_linear.train()
classifiers_non_linear.append(svm_non_linear)
plt.subplot(122)
plt.title("Linear SVM - Non Linear Features")
plot_model(plt,svm_non_linear,feats_non_linear,labels_non_linear)
gaussian_c=0.7
gaussian_kernel_linear=sg.kernel("GaussianKernel", log_width=np.log(100))
gaussian_svm_linear=sg.machine('LibSVM', C1=gaussian_c, C2=gaussian_c, kernel=gaussian_kernel_linear, labels=shogun_labels_linear)
gaussian_svm_linear.train(shogun_feats_linear)
classifiers_linear.append(gaussian_svm_linear)
fadings.append(True)
gaussian_kernel_non_linear=sg.kernel("GaussianKernel", log_width=np.log(100))
gaussian_svm_non_linear=sg.machine('LibSVM', C1=gaussian_c, C2=gaussian_c, kernel=gaussian_kernel_non_linear, labels=shogun_labels_non_linear)
gaussian_svm_non_linear.train(shogun_feats_non_linear)
classifiers_non_linear.append(gaussian_svm_non_linear)
classifiers_names.append("SVM Gaussian Kernel")
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("SVM Gaussian Kernel - Linear Features")
plot_model(plt,gaussian_svm_linear,feats_linear,labels_linear)
plt.subplot(122)
plt.title("SVM Gaussian Kernel - Non Linear Features")
plot_model(plt,gaussian_svm_non_linear,feats_non_linear,labels_non_linear)
sigmoid_c = 0.9
sigmoid_kernel_linear = SigmoidKernel(shogun_feats_linear,shogun_feats_linear,200,1,0.5)
sigmoid_svm_linear = sg.machine('LibSVM', C1=sigmoid_c, C2=sigmoid_c, kernel=sigmoid_kernel_linear, labels=shogun_labels_linear)
sigmoid_svm_linear.train()
classifiers_linear.append(sigmoid_svm_linear)
classifiers_names.append("SVM Sigmoid Kernel")
fadings.append(True)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("SVM Sigmoid Kernel - Linear Features")
plot_model(plt,sigmoid_svm_linear,feats_linear,labels_linear)
sigmoid_kernel_non_linear = SigmoidKernel(shogun_feats_non_linear,shogun_feats_non_linear,400,2.5,2)
sigmoid_svm_non_linear = sg.machine('LibSVM', C1=sigmoid_c, C2=sigmoid_c, kernel=sigmoid_kernel_non_linear, labels=shogun_labels_non_linear)
sigmoid_svm_non_linear.train()
classifiers_non_linear.append(sigmoid_svm_non_linear)
plt.subplot(122)
plt.title("SVM Sigmoid Kernel - Non Linear Features")
plot_model(plt,sigmoid_svm_non_linear,feats_non_linear,labels_non_linear)
poly_c = 0.5
degree = 4
poly_kernel_linear = sg.kernel('PolyKernel', degree=degree, c=1.0)
poly_kernel_linear.init(shogun_feats_linear, shogun_feats_linear)
poly_svm_linear = sg.machine('LibSVM', C1=poly_c, C2=poly_c, kernel=poly_kernel_linear, labels=shogun_labels_linear)
poly_svm_linear.train()
classifiers_linear.append(poly_svm_linear)
classifiers_names.append("SVM Polynomial kernel")
fadings.append(True)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("SVM Polynomial Kernel - Linear Features")
plot_model(plt,poly_svm_linear,feats_linear,labels_linear)
poly_kernel_non_linear = sg.kernel('PolyKernel', degree=degree, c=1.0)
poly_kernel_non_linear.init(shogun_feats_non_linear, shogun_feats_non_linear)
poly_svm_non_linear = sg.machine('LibSVM', C1=poly_c, C2=poly_c, kernel=poly_kernel_non_linear, labels=shogun_labels_non_linear)
poly_svm_non_linear.train()
classifiers_non_linear.append(poly_svm_non_linear)
plt.subplot(122)
plt.title("SVM Polynomial Kernel - Non Linear Features")
plot_model(plt,poly_svm_non_linear,feats_non_linear,labels_non_linear)
multiclass_labels_linear = shogun_labels_linear.get('labels')
for i in range(0,len(multiclass_labels_linear)):
if multiclass_labels_linear[i] == -1:
multiclass_labels_linear[i] = 0
multiclass_labels_non_linear = shogun_labels_non_linear.get('labels')
for i in range(0,len(multiclass_labels_non_linear)):
if multiclass_labels_non_linear[i] == -1:
multiclass_labels_non_linear[i] = 0
shogun_multiclass_labels_linear = MulticlassLabels(multiclass_labels_linear)
shogun_multiclass_labels_non_linear = MulticlassLabels(multiclass_labels_non_linear)
naive_bayes_linear = GaussianNaiveBayes()
naive_bayes_linear.put('features', shogun_feats_linear)
naive_bayes_linear.put('labels', shogun_multiclass_labels_linear)
naive_bayes_linear.train()
classifiers_linear.append(naive_bayes_linear)
classifiers_names.append("Naive Bayes")
fadings.append(False)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("Naive Bayes - Linear Features")
plot_model(plt,naive_bayes_linear,feats_linear,labels_linear,fading=False)
naive_bayes_non_linear = GaussianNaiveBayes()
naive_bayes_non_linear.put('features', shogun_feats_non_linear)
naive_bayes_non_linear.put('labels', shogun_multiclass_labels_non_linear)
naive_bayes_non_linear.train()
classifiers_non_linear.append(naive_bayes_non_linear)
plt.subplot(122)
plt.title("Naive Bayes - Non Linear Features")
plot_model(plt,naive_bayes_non_linear,feats_non_linear,labels_non_linear,fading=False)
number_of_neighbors = 10
distances_linear = sg.distance('EuclideanDistance')
distances_linear.init(shogun_feats_linear, shogun_feats_linear)
knn_linear = KNN(number_of_neighbors,distances_linear,shogun_labels_linear)
knn_linear.train()
classifiers_linear.append(knn_linear)
classifiers_names.append("Nearest Neighbors")
fadings.append(False)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("Nearest Neighbors - Linear Features")
plot_model(plt,knn_linear,feats_linear,labels_linear,fading=False)
distances_non_linear = sg.distance('EuclideanDistance')
distances_non_linear.init(shogun_feats_non_linear, shogun_feats_non_linear)
knn_non_linear = KNN(number_of_neighbors,distances_non_linear,shogun_labels_non_linear)
knn_non_linear.train()
classifiers_non_linear.append(knn_non_linear)
plt.subplot(122)
plt.title("Nearest Neighbors - Non Linear Features")
plot_model(plt,knn_non_linear,feats_non_linear,labels_non_linear,fading=False)
gamma = 0.1
lda_linear = sg.machine('LDA', gamma=gamma, labels=shogun_labels_linear)
lda_linear.train(shogun_feats_linear)
classifiers_linear.append(lda_linear)
classifiers_names.append("LDA")
fadings.append(True)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("LDA - Linear Features")
plot_model(plt,lda_linear,feats_linear,labels_linear)
lda_non_linear = sg.machine('LDA', gamma=gamma, labels=shogun_labels_non_linear)
lda_non_linear.train(shogun_feats_non_linear)
classifiers_non_linear.append(lda_non_linear)
plt.subplot(122)
plt.title("LDA - Non Linear Features")
plot_model(plt,lda_non_linear,feats_non_linear,labels_non_linear)
qda_linear = QDA(shogun_feats_linear, shogun_multiclass_labels_linear)
qda_linear.train()
classifiers_linear.append(qda_linear)
classifiers_names.append("QDA")
fadings.append(False)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("QDA - Linear Features")
plot_model(plt,qda_linear,feats_linear,labels_linear,fading=False)
qda_non_linear = QDA(shogun_feats_non_linear, shogun_multiclass_labels_non_linear)
qda_non_linear.train()
classifiers_non_linear.append(qda_non_linear)
plt.subplot(122)
plt.title("QDA - Non Linear Features")
plot_model(plt,qda_non_linear,feats_non_linear,labels_non_linear,fading=False)
# create Gaussian kernel with width = 2.0
kernel = sg.kernel("GaussianKernel", log_width=np.log(2))
# create zero mean function
zero_mean = ZeroMean()
# create logit likelihood model
likelihood = LogitLikelihood()
# specify EP approximation inference method
inference_model_linear = EPInferenceMethod(kernel, shogun_feats_linear, zero_mean, shogun_labels_linear, likelihood)
# create and train GP classifier, which uses Laplace approximation
gaussian_logit_linear = GaussianProcessClassification(inference_model_linear)
gaussian_logit_linear.train()
classifiers_linear.append(gaussian_logit_linear)
classifiers_names.append("Gaussian Process Logit")
fadings.append(True)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("Gaussian Process - Logit - Linear Features")
plot_model(plt,gaussian_logit_linear,feats_linear,labels_linear)
inference_model_non_linear = EPInferenceMethod(kernel, shogun_feats_non_linear, zero_mean,
shogun_labels_non_linear, likelihood)
gaussian_logit_non_linear = GaussianProcessClassification(inference_model_non_linear)
gaussian_logit_non_linear.train()
classifiers_non_linear.append(gaussian_logit_non_linear)
plt.subplot(122)
plt.title("Gaussian Process - Logit - Non Linear Features")
plot_model(plt,gaussian_logit_non_linear,feats_non_linear,labels_non_linear)
likelihood = ProbitLikelihood()
inference_model_linear = EPInferenceMethod(kernel, shogun_feats_linear, zero_mean, shogun_labels_linear, likelihood)
gaussian_probit_linear = GaussianProcessClassification(inference_model_linear)
gaussian_probit_linear.train()
classifiers_linear.append(gaussian_probit_linear)
classifiers_names.append("Gaussian Process Probit")
fadings.append(True)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("Gaussian Process - Probit - Linear Features")
plot_model(plt,gaussian_probit_linear,feats_linear,labels_linear)
inference_model_non_linear = EPInferenceMethod(kernel, shogun_feats_non_linear,
zero_mean, shogun_labels_non_linear, likelihood)
gaussian_probit_non_linear = GaussianProcessClassification(inference_model_non_linear)
gaussian_probit_non_linear.train()
classifiers_non_linear.append(gaussian_probit_non_linear)
plt.subplot(122)
plt.title("Gaussian Process - Probit - Non Linear Features")
plot_model(plt,gaussian_probit_non_linear,feats_non_linear,labels_non_linear)
figure = plt.figure(figsize=(30,9))
plt.subplot(2,11,1)
plot_binary_data(plt,feats_linear, labels_linear)
for i in range(0,10):
plt.subplot(2,11,i+2)
plt.title(classifiers_names[i])
plot_model(plt,classifiers_linear[i],feats_linear,labels_linear,fading=fadings[i])
plt.subplot(2,11,12)
plot_binary_data(plt,feats_non_linear, labels_non_linear)
for i in range(0,10):
plt.subplot(2,11,13+i)
plot_model(plt,classifiers_non_linear[i],feats_non_linear,labels_non_linear,fading=fadings[i])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id = "section1">Data Generation and Visualization</a>
Step5: Data visualization methods.
Step6: <a id="section2" href="http
Step7: SVM - Kernels
Step8: <a id ="section2c" href="http
Step9: <a id ="section2d" href="http
Step10: <a id ="section3" href="http
Step11: <a id ="section4" href="http
Step12: <a id ="section5" href="http
Step13: <a id ="section6" href="http
Step14: <a id ="section7" href="http
Step15: <a id ="section7b">Probit Likelihood model</a>
Step16: <a id="section8">Putting It All Together</a>
|
10,243
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
my_dictionary = {'a' : 45., 'b' : -19.5, 'c' : 4444}
print(my_dictionary.keys())
print(my_dictionary.values())
cookbook_df = pd.DataFrame({'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]})
cookbook_df
series_dict = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
series_df = pd.DataFrame(series_dict)
series_df
produce_dict = {'veggies': ['potatoes', 'onions', 'peppers', 'carrots'],
'fruits': ['apples', 'bananas', 'pineapple', 'berries']}
produce_dict
data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]
pd.DataFrame(data2)
pd.DataFrame({('a', 'b'): {('A', 'B'): 1, ('A', 'C'): 2},
('a', 'a'): {('A', 'C'): 3, ('A', 'B'): 4},
('a', 'c'): {('A', 'B'): 5, ('A', 'C'): 6},
('b', 'a'): {('A', 'C'): 7, ('A', 'B'): 8},
('b', 'b'): {('A', 'D'): 9, ('A', 'B'): 10}})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating data frames from various data types
Step2: constructor without explicit index
Step3: constructor contains dictionary with Series as values
Step4: dictionary of lists
Step5: list of dictionaries
Step6: dictionary of tuples, with multi index
|
10,244
|
<ASSISTANT_TASK:>
Python Code:
# we assume that we have the dynet module in your path.
# OUTDATED: we also assume that LD_LIBRARY_PATH includes a pointer to where libcnn_shared.so is.
from dynet import *
# create a parameter collection and add the parameters.
m = ParameterCollection()
pW = m.add_parameters((8,2))
pV = m.add_parameters((1,8))
pb = m.add_parameters((8))
renew_cg() # new computation graph. not strictly needed here, but good practice.
# associate the parameters with cg Expressions
W = parameter(pW)
V = parameter(pV)
b = parameter(pb)
#b[1:-1].value()
b.value()
x = vecInput(2) # an input vector of size 2. Also an expression.
output = logistic(V*(tanh((W*x)+b)))
# we can now query our network
x.set([0,0])
output.value()
# we want to be able to define a loss, so we need an input expression to work against.
y = scalarInput(0) # this will hold the correct answer
loss = binary_log_loss(output, y)
x.set([1,0])
y.set(0)
print loss.value()
y.set(1)
print loss.value()
trainer = SimpleSGDTrainer(m)
x.set([1,0])
y.set(1)
loss_value = loss.value() # this performs a forward through the network.
print "the loss before step is:",loss_value
# now do an optimization step
loss.backward() # compute the gradients
trainer.update()
# see how it affected the loss:
loss_value = loss.value(recalculate=True) # recalculate=True means "don't use precomputed value"
print "the loss after step is:",loss_value
def create_xor_instances(num_rounds=2000):
questions = []
answers = []
for round in xrange(num_rounds):
for x1 in 0,1:
for x2 in 0,1:
answer = 0 if x1==x2 else 1
questions.append((x1,x2))
answers.append(answer)
return questions, answers
questions, answers = create_xor_instances()
total_loss = 0
seen_instances = 0
for question, answer in zip(questions, answers):
x.set(question)
y.set(answer)
seen_instances += 1
total_loss += loss.value()
loss.backward()
trainer.update()
if (seen_instances > 1 and seen_instances % 100 == 0):
print "average loss is:",total_loss / seen_instances
x.set([0,1])
print "0,1",output.value()
x.set([1,0])
print "1,0",output.value()
x.set([0,0])
print "0,0",output.value()
x.set([1,1])
print "1,1",output.value()
W.value()
V.value()
b.value()
# define the parameters
m = ParameterCollection()
pW = m.add_parameters((8,2))
pV = m.add_parameters((1,8))
pb = m.add_parameters((8))
# renew the computation graph
renew_cg()
# add the parameters to the graph
W = parameter(pW)
V = parameter(pV)
b = parameter(pb)
# create the network
x = vecInput(2) # an input vector of size 2.
output = logistic(V*(tanh((W*x)+b)))
# define the loss with respect to an output y.
y = scalarInput(0) # this will hold the correct answer
loss = binary_log_loss(output, y)
# create training instances
def create_xor_instances(num_rounds=2000):
questions = []
answers = []
for round in xrange(num_rounds):
for x1 in 0,1:
for x2 in 0,1:
answer = 0 if x1==x2 else 1
questions.append((x1,x2))
answers.append(answer)
return questions, answers
questions, answers = create_xor_instances()
# train the network
trainer = SimpleSGDTrainer(m)
total_loss = 0
seen_instances = 0
for question, answer in zip(questions, answers):
x.set(question)
y.set(answer)
seen_instances += 1
total_loss += loss.value()
loss.backward()
trainer.update()
if (seen_instances > 1 and seen_instances % 100 == 0):
print "average loss is:",total_loss / seen_instances
from dynet import *
# create training instances, as before
def create_xor_instances(num_rounds=2000):
questions = []
answers = []
for round in xrange(num_rounds):
for x1 in 0,1:
for x2 in 0,1:
answer = 0 if x1==x2 else 1
questions.append((x1,x2))
answers.append(answer)
return questions, answers
questions, answers = create_xor_instances()
# create a network for the xor problem given input and output
def create_xor_network(pW, pV, pb, inputs, expected_answer):
renew_cg() # new computation graph
W = parameter(pW) # add parameters to graph as expressions
V = parameter(pV)
b = parameter(pb)
x = vecInput(len(inputs))
x.set(inputs)
y = scalarInput(expected_answer)
output = logistic(V*(tanh((W*x)+b)))
loss = binary_log_loss(output, y)
return loss
m2 = ParameterCollection()
pW = m2.add_parameters((8,2))
pV = m2.add_parameters((1,8))
pb = m2.add_parameters((8))
trainer = SimpleSGDTrainer(m2)
seen_instances = 0
total_loss = 0
for question, answer in zip(questions, answers):
loss = create_xor_network(pW, pV, pb, question, answer)
seen_instances += 1
total_loss += loss.value()
loss.backward()
trainer.update()
if (seen_instances > 1 and seen_instances % 100 == 0):
print "average loss is:",total_loss / seen_instances
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The first block creates a parameter collection and populates it with parameters.
Step2: Training
Step3: To use the trainer, we need to
Step4: The optimization step indeed made the loss decrease. We now need to run this in a loop.
Step5: We now feed each question / answer pair to the network, and try to minimize the loss.
Step6: Our network is now trained. Let's verify that it indeed learned the xor function
Step7: In case we are curious about the parameter values, we can query them
Step8: To summarize
Step9: Dynamic Networks
|
10,245
|
<ASSISTANT_TASK:>
Python Code:
nb_name = "DCAL_Water_WOFS"
# Enable importing of utilities.
import sys
import os
sys.path.append(os.environ.get('NOTEBOOK_ROOT'))
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# Load Data Cube Configuration
import datacube
import utils.data_cube_utilities.data_access_api as dc_api
api = dc_api.DataAccessApi()
dc = api.dc
# Get available products
products_info = dc.list_products()
# List LANDSAT 7 products
print("LANDSAT 7 Products:")
products_info[["platform", "name"]][products_info.platform == "LANDSAT_7"]
# List LANDSAT 8 products
print("LANDSAT 8 Products:")
products_info[["platform", "name"]][products_info.platform == "LANDSAT_8"]
# CHANGE HERE >>>>>>>>>>>>>>>>>
# Select a Product and Platform
product = 'ls8_usgs_sr_scene'
platform = 'LANDSAT_8'
collection = 'c1'
level = 'l2'
from utils.data_cube_utilities.dc_load import get_product_extents
from utils.data_cube_utilities.dc_time import dt_to_str
full_lat, full_lon, min_max_dates = get_product_extents(api, platform, product)
# Print the extents of the combined data.
print("Latitude Extents:", full_lat)
print("Longitude Extents:", full_lon)
print("Time Extents:", list(map(dt_to_str, min_max_dates)))
from utils.data_cube_utilities.dc_display_map import display_map
display_map(full_lat, full_lon)
# Select an analysis region (Lat-Lon) within the extents listed above.
# Be sure you check whether you are using L7 or L8 as the time extents are very different
# HINT: Keep your region small (<0.5 deg square) to avoid memory overload issues
# Select a time period (Min-Max) within the extents listed above (Year-Month-Day)
# This region and time period will be used for the water assessment
# Mombasa, Kenya
latitude_extents = (-4.0475, -3.9574)
longitude_extents = (39.6028, 39.6792)
time_extents = ('2015-01-01', '2016-12-31')
# L. Tanganyika, Tanzania
# latitude_extents = (-8.8901, -3.3042)
# longitude_extents = (29.0069, 31.2510)
# time_extents = ('2016-01-01', '2017-01-01')
# L. Turkana, Kenya
# latitude_extents = ( 2.3248, 4.6859)
# longitude_extents = (35.7751, 36.7639)
# time_extents = ('2000-01-01', '2015-01-01')
# Ndakaini Dam, Kenya (provides water to Nairobi)
# latitude_extents = (-0.8269, -0.8090)
# longitude_extents = (36.8192, 36.8529)
# time_extents = ('2000-01-01', '2018-01-01')
display_map(latitude = latitude_extents, longitude = longitude_extents)
# Perform an empty load to create a blank dataset with correct dimensions.
landsat_dataset = dc.load(
platform = platform,
product = product,
latitude = latitude_extents,
longitude = longitude_extents,
time = time_extents,
measurements = [], # Do not load any measurements.
)
print(landsat_dataset)
ds_times = landsat_dataset.time # Save the acquisition dates for later
landsat_dataset = landsat_dataset.drop('time') # Drop time dimension
# Create a new band which will be used to store total number of NON-CLOUD (land+water) pixels
landsat_dataset = landsat_dataset.assign({
'count_total':
xr.DataArray(
np.zeros([d for d in landsat_dataset.dims.values()], dtype=np.uint16),
dims=landsat_dataset.dims,
coords=landsat_dataset.coords,
),
})
# Create a new band which will be used to store total number of WATER pixels (subset of NON-CLOUD)
landsat_dataset = landsat_dataset.assign({
'count_water':
xr.DataArray(
np.zeros([d for d in landsat_dataset.dims.values()], dtype=np.uint16),
dims=landsat_dataset.dims,
coords=landsat_dataset.coords,
),
})
# Remove some attributes to allow saving of progress to NetCDF files.
del landsat_dataset.attrs['crs'], ds_times.attrs['units']
landsat_dataset
# This function just outputs a progress bar so I know things are still moving...
def progbar(progress, start_time=None, start_progress=0, msg='Progress:', bar_length=50):
from IPython.display import clear_output
import datetime
# Clamp progress
if not isinstance(progress, (float,int)): progress = 0;
elif progress < 0: progress = 0;
elif progress >= 1: progress = 1;
# ETA
if progress>0 and isinstance(start_time, datetime.datetime):
cur_time = datetime.datetime.utcnow()
per_step_dur = (cur_time - start_time)/(progress-start_progress)
duration = per_step_dur*progress # Estimated processing duration until this point.
eta_dur = duration*(1-progress)/progress
eta_time = cur_time + eta_dur
time_str = "ETA: {eta} ({mins:.2f} min)".format(
eta = str(eta_time.isoformat(' ', timespec='seconds')),
mins = eta_dur.seconds / 60,
)
else: time_str ="";
# Print it out
blocks = int(round(bar_length * progress))
clear_output(wait = True)
print("{msg} [{bar}] {pct:.1f}% {time_str}".format( bar="#" * blocks + "-" * (bar_length - blocks), pct=progress * 100, msg=msg, time_str=time_str))
return datetime.datetime.utcnow() # Return current time
from utils.data_cube_utilities.clean_mask import landsat_qa_clean_mask
from utils.data_cube_utilities.dc_water_classifier import wofs_classify
from utils.data_cube_utilities.dc_load import is_dataset_empty
import os
import pickle
# Create a DataArray to hold the per-acquisition water percentages.
timeslice_water_pct = xr.DataArray(np.full(len(ds_times), np.nan), name='timeslice_water_pct',
dims=ds_times.dims, coords=ds_times.coords)
# Reset counters to zero (just in case we run this cell multiple times for some reason).
landsat_dataset.count_water.values*=0
landsat_dataset.count_total.values*=0
# Create geographic chunks.
from utils.data_cube_utilities.dc_chunker import create_geographic_chunks
geographic_chunks = create_geographic_chunks(longitude_extents, latitude_extents, 0.1)
tmp_dir = 'tmp/{}'.format(nb_name)
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
# Check if we are continuing the previous task (parameter set).
params_filepath = '{}/params.pkl'.format(tmp_dir)
full_params = dict(platform=platform, product=product,
latitude=latitude_extents, longitude=longitude_extents, time=time_extents,
measurements=['red', 'green', 'blue', 'nir', 'swir1', 'swir2', 'pixel_qa'])
continuing_task = False
if os.path.exists(params_filepath):
with open(params_filepath, 'rb') as params_file:
old_params = pickle.load(params_file)
if old_params == full_params:
continuing_task = True
progress_raster_file = "{}/progress_raster.nc".format(tmp_dir)
progress_time_ind_file = "{}/time_ind.pk".format(tmp_dir)
progress_timeslice_water_pct_file = "{}/timeslice_water.nc".format(tmp_dir)
# If the parameter set is new, record it and remove old progress files.
if not continuing_task:
for root, dirs, files in os.walk(tmp_dir):
for file in files:
os.remove(os.path.join(root, file))
with open(params_filepath, 'wb') as params_file:
pickle.dump(full_params, params_file)
parameters = full_params.copy()
del parameters['time']
# Load the progress if any exists.
if os.path.exists(progress_raster_file):
with xr.open_dataset(progress_raster_file) as raster_raster_file_handle:
landsat_dataset = raster_raster_file_handle.load()
if os.path.exists(progress_time_ind_file):
with open(progress_time_ind_file, 'rb') as pickle_file:
last_completed_time_ind = pickle.load(pickle_file)
else:
last_completed_time_ind = -1
if os.path.exists(progress_timeslice_water_pct_file):
with xr.open_dataset(progress_timeslice_water_pct_file) as progress_timeslice_water_pct_file_handle:
timeslice_water_pct = progress_timeslice_water_pct_file_handle.load().timeslice_water_pct
# Loop over all the time slices, processing each one.
start_progress = (last_completed_time_ind+1)/len(ds_times)
start_time = progbar(start_progress)
for time_ind,t in enumerate(ds_times.values):
if time_ind <= last_completed_time_ind:
continue
parameters.update(dict(time=str(t)))
timeslice_total_cloud = 0
timeslice_total_water = 0
# Process each geographic chunk for this time slice.
for geo_chunk_ind, geographic_chunk in enumerate(geographic_chunks):
if geo_chunk_ind == 0:
print("Processing geographic chunks.")
elif geo_chunk_ind % 5 == 0:
print("Processed {:.3%} of all geographic chunks.".format(geo_chunk_ind / len(geographic_chunks)))
landsat_slice_dict = dict(latitude=slice(*geographic_chunk['latitude'][::-1]),
longitude=slice(*geographic_chunk['longitude']))
geo_coords = landsat_dataset.sel(landsat_slice_dict).coords
# Get the actual geographic extents of this data.
geographic_chunk['latitude'] = geo_coords['latitude'][[0,-1]].values[::-1]
geographic_chunk['longitude'] = geo_coords['longitude'][[0,-1]].values
landsat_slice_dict = dict(latitude=slice(*geographic_chunk['latitude'][::-1]),
longitude=slice(*geographic_chunk['longitude']))
parameters.update(geographic_chunk)
landsat_chunk = dc.load(**parameters)
if is_dataset_empty(landsat_chunk):
continue
landsat_chunk = landsat_chunk.squeeze('time')
# Mask Clouds
cloud_mask = landsat_qa_clean_mask(landsat_chunk, platform=platform,
collection=collection, level=level)
timeslice_total_cloud += cloud_mask.sum()
# Add to total cloud-free pixel count.
chunk_count_total = landsat_dataset.count_total.sel(landsat_slice_dict)
chunk_count_total.values += cloud_mask.values
# Classify Water
water_mask = wofs_classify(landsat_chunk, clean_mask = cloud_mask.values, mosaic=True, no_data=0).wofs.astype(np.bool)
timeslice_total_water += water_mask.sum()
# Add to total water pixel count
chunk_count_water = landsat_dataset.count_water.sel(landsat_slice_dict)
chunk_count_water.values += water_mask.values
# Compute percent of cloud-free pixels that are water for this timeslice.
timeslice_water_pct[time_ind] = timeslice_total_water / timeslice_total_cloud
# Save progress.
if os.path.exists(progress_raster_file):
os.remove(progress_raster_file)
landsat_dataset.to_netcdf(progress_raster_file)
if os.path.exists(progress_time_ind_file):
os.remove(progress_time_ind_file)
with open(progress_time_ind_file, 'wb') as pickle_file:
pickle.dump(time_ind, pickle_file)
if os.path.exists(progress_timeslice_water_pct_file):
os.remove(progress_timeslice_water_pct_file)
timeslice_water_pct.to_netcdf(progress_timeslice_water_pct_file)
# Print progress bar
progbar((time_ind+1)/len(ds_times), start_time, start_progress)
# Plot time slice water percentages (except when all values are NaN, which causes an error).
if ~np.all(np.isnan(timeslice_water_pct)):
timeslice_water_pct.plot(figsize=(12,2), marker='o', linestyle='None')
# plt.gca().set_yscale('log') # Change scaling of the Y axis to logarithmic
plt.xlim(timeslice_water_pct.time.values[[0,-1]])
plt.ylim(0, 1)
plt.show()
# Draw map as we go
# landsat_dataset_low_res = xr_scale_res(landsat_dataset, frac_res=0.01)
# plt.imshow(landsat_dataset_low_res.count_water / landsat_dataset_low_res.count_total,
# aspect='equal')
# plt.show()
progbar(1)
landsat_dataset
# import color-scheme and set nans to black
from matplotlib.cm import jet_r as jet_r
import copy
jet_r = copy.copy(jet_r) # Copy to modify.
jet_r.set_bad('black',1)
# Here we plot the product we have created
# Areas of RED have experienced little or no water over the time series
# Areas of BLUE have experience significant or constant water over the time series
tmp = landsat_dataset.count_water / landsat_dataset.count_total
fig,ax = plt.subplots(figsize=(14,14))
plt.imshow(tmp, cmap = jet_r, aspect="equal", extent=(
landsat_dataset.longitude.min(),
landsat_dataset.longitude.max(),
landsat_dataset.latitude.min(),
landsat_dataset.latitude.max(),
))
plt.colorbar()
plt.show()
# Here we plot the percentage of valid (non-cloudy) pixels that were identified as water for each time slice
timeslice_water_pct.plot(figsize=(15,3), marker='o', linestyle='None')
plt.show()
# It is often helpful to use a logarithmic scale when viewing these types of plots, as the "interesting"
# behavior tends to be clustered near zero and large outliers can hide small trends.
# The result below can be compared to the result above.
timeslice_water_pct.plot(figsize=(15,3), marker='o', linestyle='None')
plt.gca().set_yscale('log') # Change scaling of the Y axis to logarithmic
plt.show()
# Save the water percentage image to a GeoTIFF.
from utils.data_cube_utilities.import_export import export_slice_to_geotiff
dataset_to_export = xr.Dataset(coords=landsat_dataset.coords, attrs=landsat_dataset.attrs)
dataset_to_export['wofs_pct'] = tmp.astype(np.float32).where(landsat_dataset.count_total>0)
# The export command below is commented out to avoid overwriting files.
# If you would like to export data, please check the file path before uncommenting the line
# to ensure no files are accidentally lost.
output_dir = 'output/geotiffs'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
export_slice_to_geotiff(dataset_to_export, output_dir + '/WOFS_Percentage.tif')
!ls -lah output/geotiffs/WOFS_Percentage.tif
!gdalinfo output/geotiffs/WOFS_Percentage.tif
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <span id="import">Import Dependencies and Connect to the Data Cube ▴</span>
Step2: <span id="plat_prod">Choose Platforms and Products ▴</span>
Step3: Choose products
Step4: <span id="extents">Get the Extents of the Cube ▴</span>
Step5: Visualize the available area
Step6: <span id="define_extents">Define the Extents of the Analysis ▴</span>
Step7: Visualize the selected area
Step8: <span id="load_data">Load and Clean Data from the Data Cube ▴</span>
Step9: <span id="time_series_water">Time Series Water Detection Analysis ▴</span>
Step10: <span id="export">Create GeoTIFF Output Products ▴</span>
Step11: <p style="color
|
10,246
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
Image('../data/scatter_plot.png')
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 6.0)
from sklearn.datasets.samples_generator import make_blobs
X, y = blobs = make_blobs(n_samples=500, centers=5, cluster_std=1.5, random_state=8)
plt.scatter(X[:,0], X[:,1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Of course, the first thing we need is the data. Usually this data will come from your experiments or your computations, but here we are going to generate it. It's always good to be able to generate some fake data to see if our algorithms will work.
Step2: Start with the default options for scatter and then change the code until you get as close as possible to the figure above.
|
10,247
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import shutil
import os
import tensorflow as tf
print(tf.__version__)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("mnist/data", one_hot = True, reshape = False)
print(mnist.train.images.shape)
print(mnist.train.labels.shape)
HEIGHT = 28
WIDTH = 28
NCLASSES = 10
import matplotlib.pyplot as plt
IMGNO = 12
plt.imshow(mnist.test.images[IMGNO].reshape(HEIGHT, WIDTH));
def linear_model(img):
#TODO
return ylogits, NCLASSES
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x = {"image": mnist.train.images},
y = mnist.train.labels,
batch_size = 100,
num_epochs = None,
shuffle = True,
queue_capacity = 5000
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
#TODO
)
def serving_input_fn():
inputs = {"image": tf.placeholder(dtype = tf.float32, shape = [None, HEIGHT, WIDTH])}
features = inputs # as-is
return tf.estimator.export.ServingInputReceiver(features = features, receiver_tensors = inputs)
def image_classifier(features, labels, mode, params):
ylogits, nclasses = linear_model(features["image"])
probabilities = tf.nn.softmax(logits = ylogits)
class_ids = tf.cast(x = tf.argmax(input = probabilities, axis = 1), dtype = tf.uint8)
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
loss = tf.reduce_mean(input_tensor = tf.nn.softmax_cross_entropy_with_logits_v2(logits = ylogits, labels = labels))
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = params["learning_rate"],
optimizer = "Adam")
eval_metric_ops = None
else:
train_op = None
eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels = tf.argmax(input = labels, axis = 1), predictions = class_ids)}
else:
loss = None
train_op = None
eval_metric_ops = None
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = {"probabilities": probabilities, "class_ids": class_ids},
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = {"predictions": tf.estimator.export.PredictOutput({"probabilities": probabilities, "class_ids": class_ids})}
)
def train_and_evaluate(output_dir, hparams):
estimator = tf.estimator.Estimator(
model_fn = image_classifier,
model_dir = output_dir,
params = hparams)
train_spec = tf.estimator.TrainSpec(
input_fn = train_input_fn,
max_steps = hparams["train_steps"])
exporter = tf.estimator.LatestExporter(name = "exporter", serving_input_receiver_fn = serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn = eval_input_fn,
steps = None,
exporters = exporter)
tf.estimator.train_and_evaluate(estimator = estimator, train_spec = train_spec, eval_spec = eval_spec)
OUTDIR = "mnist/learned"
shutil.rmtree(path = OUTDIR, ignore_errors = True) # start fresh each time
hparams = {"train_steps": 1000, "learning_rate": 0.01}
train_and_evaluate(OUTDIR, hparams)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring the data
Step2: Define the model.
Step3: Write Input Functions
Step4: Write Custom Estimator
Step5: tf.estimator.train_and_evaluate does distributed training.
Step6: This is the main() function
|
10,248
|
<ASSISTANT_TASK:>
Python Code:
abbr = 'NLP'
full_text = 'Natural Language Processing'
# Enter your code here:
print(f'{abbr} stands for {full_text}')
%%writefile contacts.txt
First_Name Last_Name, Title, Extension, Email
# Write your code here:
with open('contacts.txt') as c:
fields = c.read()
# Run fields to see the contents of contacts.txt:
fields
# Perform import
import PyPDF2
# Open the file as a binary object
f = open('Business_Proposal.pdf','rb')
# Use PyPDF2 to read the text of the file
pdf_reader = PyPDF2.PdfFileReader(f)
# Get the text from page 2 (CHALLENGE: Do this in one step!)
page_two_text = pdf_reader.getPage(1).extractText()
# Close the file
f.close()
# Print the contents of page_two_text
print(page_two_text)
# Simple Solution:
with open('contacts.txt','a+') as c:
c.write(page_two_text)
c.seek(0)
print(c.read())
# CHALLENGE Solution (re-run the %%writefile cell above to obtain an unmodified contacts.txt file):
with open('contacts.txt','a+') as c:
c.write(page_two_text[8:])
c.seek(0)
print(c.read())
import re
# Enter your regex pattern here. This may take several tries!
pattern = r'\w+@\w+.\w{3}'
re.findall(pattern, page_two_text)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Files
Step2: 3. Open the file and use .read() to save the contents of the file to a string called fields. Make sure the file is closed at the end.
Step3: Working with PDF Files
Step4: 5. Open the file contacts.txt in append mode. Add the text of page 2 from above to contacts.txt.
Step5: Regular Expressions
|
10,249
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# Text Dataset type
DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/text_1.0.0.yaml"
# Text Labeling type
LABEL_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_classification_multi_label_io_format_1.0.0.yaml"
# Text Training task
TRAINING_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_classification_1.0.0.yaml"
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_dataset_client():
client = aip.DatasetServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
clients = {}
clients["dataset"] = create_dataset_client()
clients["model"] = create_model_client()
clients["pipeline"] = create_pipeline_client()
clients["job"] = create_job_client()
for client in clients.items():
print(client)
TIMEOUT = 90
def create_dataset(name, schema, labels=None, timeout=TIMEOUT):
start_time = time.time()
try:
dataset = aip.Dataset(
display_name=name, metadata_schema_uri=schema, labels=labels
)
operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset)
print("Long running operation:", operation.operation.name)
result = operation.result(timeout=TIMEOUT)
print("time:", time.time() - start_time)
print("response")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" metadata_schema_uri:", result.metadata_schema_uri)
print(" metadata:", dict(result.metadata))
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
print(" etag:", result.etag)
print(" labels:", dict(result.labels))
return result
except Exception as e:
print("exception:", e)
return None
result = create_dataset("mcdonalds-" + TIMESTAMP, DATA_SCHEMA)
# The full unique ID for the dataset
dataset_id = result.name
# The short numeric ID for the dataset
dataset_short_id = dataset_id.split("/")[-1]
print(dataset_id)
IMPORT_FILE = "gs://ucaip-test-us-central1/dataset/ucaip_multi_tcn_dataset.csv"
if "IMPORT_FILES" in globals():
FILE = IMPORT_FILES[0]
else:
FILE = IMPORT_FILE
count = ! gsutil cat $FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $FILE | head
def import_data(dataset, gcs_sources, schema):
config = [{"gcs_source": {"uris": gcs_sources}, "import_schema_uri": schema}]
print("dataset:", dataset_id)
start_time = time.time()
try:
operation = clients["dataset"].import_data(
name=dataset_id, import_configs=config
)
print("Long running operation:", operation.operation.name)
result = operation.result()
print("result:", result)
print("time:", int(time.time() - start_time), "secs")
print("error:", operation.exception())
print("meta :", operation.metadata)
print(
"after: running:",
operation.running(),
"done:",
operation.done(),
"cancelled:",
operation.cancelled(),
)
return operation
except Exception as e:
print("exception:", e)
return None
import_data(dataset_id, [IMPORT_FILE], LABEL_SCHEMA)
def create_pipeline(pipeline_name, model_name, dataset, schema, task):
dataset_id = dataset.split("/")[-1]
input_config = {
"dataset_id": dataset_id,
"fraction_split": {
"training_fraction": 0.8,
"validation_fraction": 0.1,
"test_fraction": 0.1,
},
}
training_pipeline = {
"display_name": pipeline_name,
"training_task_definition": schema,
"training_task_inputs": task,
"input_data_config": input_config,
"model_to_upload": {"display_name": model_name},
}
try:
pipeline = clients["pipeline"].create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline
)
print(pipeline)
except Exception as e:
print("exception:", e)
return None
return pipeline
PIPE_NAME = "mcdonalds_pipe-" + TIMESTAMP
MODEL_NAME = "mcdonalds_model-" + TIMESTAMP
task = json_format.ParseDict(
{
"multi_label": True,
},
Value(),
)
response = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task)
# The full unique ID for the pipeline
pipeline_id = response.name
# The short numeric ID for the pipeline
pipeline_short_id = pipeline_id.split("/")[-1]
print(pipeline_id)
def get_training_pipeline(name, silent=False):
response = clients["pipeline"].get_training_pipeline(name=name)
if silent:
return response
print("pipeline")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" state:", response.state)
print(" training_task_definition:", response.training_task_definition)
print(" training_task_inputs:", dict(response.training_task_inputs))
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", dict(response.labels))
return response
response = get_training_pipeline(pipeline_id)
while True:
response = get_training_pipeline(pipeline_id, True)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_to_deploy_id = None
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
raise Exception("Training Job Failed")
else:
model_to_deploy = response.model_to_upload
model_to_deploy_id = model_to_deploy.name
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
print("model to deploy:", model_to_deploy_id)
def list_model_evaluations(name):
response = clients["model"].list_model_evaluations(parent=name)
for evaluation in response:
print("model_evaluation")
print(" name:", evaluation.name)
print(" metrics_schema_uri:", evaluation.metrics_schema_uri)
metrics = json_format.MessageToDict(evaluation._pb.metrics)
for metric in metrics.keys():
print(metric)
print("logloss", metrics["logLoss"])
print("auPrc", metrics["auPrc"])
return evaluation.name
last_evaluation = list_model_evaluations(model_to_deploy_id)
test_items = ! gsutil cat $IMPORT_FILE | head -n2
cols_1 = str(test_items[0]).split(",")
cols_2 = str(test_items[1]).split(",")
test_item_1 = cols_1[0]
test_label_1 = cols_1[1:]
test_item_2 = cols_2[0]
test_label_2 = cols_2[1:]
print(test_item_1, test_label_1)
print(test_item_2, test_label_2)
import json
import tensorflow as tf
gcs_test_item_1 = BUCKET_NAME + "/test1.txt"
with tf.io.gfile.GFile(gcs_test_item_1, "w") as f:
f.write(test_item_1 + "\n")
gcs_test_item_2 = BUCKET_NAME + "/test2.txt"
with tf.io.gfile.GFile(gcs_test_item_2, "w") as f:
f.write(test_item_2 + "\n")
gcs_input_uri = BUCKET_NAME + "/test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
data = {"content": gcs_test_item_1, "mime_type": "text/plain"}
f.write(json.dumps(data) + "\n")
data = {"content": gcs_test_item_2, "mime_type": "text/plain"}
f.write(json.dumps(data) + "\n")
print(gcs_input_uri)
! gsutil cat $gcs_input_uri
MIN_NODES = 1
MAX_NODES = 1
BATCH_MODEL = "mcdonalds_batch-" + TIMESTAMP
def create_batch_prediction_job(
display_name,
model_name,
gcs_source_uri,
gcs_destination_output_uri_prefix,
parameters=None,
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model_name,
"model_parameters": json_format.ParseDict(parameters, Value()),
"input_config": {
"instances_format": IN_FORMAT,
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": OUT_FORMAT,
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
"dedicated_resources": {
"machine_spec": machine_spec,
"starting_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
},
}
response = clients["job"].create_batch_prediction_job(
parent=PARENT, batch_prediction_job=batch_prediction_job
)
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try:
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", response.labels)
return response
IN_FORMAT = "jsonl"
OUT_FORMAT = "jsonl" # [jsonl]
response = create_batch_prediction_job(
BATCH_MODEL, model_to_deploy_id, gcs_input_uri, BUCKET_NAME, None
)
# The full unique ID for the batch job
batch_job_id = response.name
# The short numeric ID for the batch job
batch_job_short_id = batch_job_id.split("/")[-1]
print(batch_job_id)
def get_batch_prediction_job(job_name, silent=False):
response = clients["job"].get_batch_prediction_job(name=job_name)
if silent:
return response.output_config.gcs_destination.output_uri_prefix, response.state
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try: # not all data types support explanations
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" error:", response.error)
gcs_destination = response.output_config.gcs_destination
print(" gcs_destination")
print(" output_uri_prefix:", gcs_destination.output_uri_prefix)
return gcs_destination.output_uri_prefix, response.state
predictions, state = get_batch_prediction_job(batch_job_id)
def get_latest_predictions(gcs_out_dir):
Get the latest prediction subfolder using the timestamp in the subfolder name
folders = !gsutil ls $gcs_out_dir
latest = ""
for folder in folders:
subfolder = folder.split("/")[-2]
if subfolder.startswith("prediction-"):
if subfolder > latest:
latest = folder[:-1]
return latest
while True:
predictions, state = get_batch_prediction_job(batch_job_id, True)
if state != aip.JobState.JOB_STATE_SUCCEEDED:
print("The job has not completed:", state)
if state == aip.JobState.JOB_STATE_FAILED:
raise Exception("Batch Job Failed")
else:
folder = get_latest_predictions(predictions)
! gsutil ls $folder/prediction*.jsonl
! gsutil cat $folder/prediction*.jsonl
break
time.sleep(60)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Vertex constants
Step12: AutoML constants
Step13: Hardware Accelerators
Step14: Container (Docker) image
Step15: Tutorial
Step16: Dataset
Step17: Now save the unique dataset identifier for the Dataset resource instance you created.
Step18: Data preparation
Step19: Quick peek at your data
Step20: Import data
Step21: Train the model
Step22: Construct the task requirements
Step23: Now save the unique identifier of the training pipeline you created.
Step24: Get information on a training pipeline
Step25: Deployment
Step26: Model information
Step27: Model deployment for batch prediction
Step28: Make the batch input file
Step29: Compute instance scaling
Step30: Make batch prediction request
Step31: Now get the unique identifier for the batch prediction job you created.
Step32: Get information on a batch prediction job
Step34: Get the predictions
Step35: Cleaning up
|
10,250
|
<ASSISTANT_TASK:>
Python Code:
person_height_ft = pd.Series([5.5,5.2,5.8,6.1,4.8],name='height',
index = ['person_a','person_b','person_c','person_d','person_e'],dtype=np.float64)
person_height_ft
person_height_ft.values
person_height_ft.index
person_height_ft['person_c']
person_height_ft[3]
person_height_ft[0:3]
person_height_mtr = (12* 2.54) * person_height_ft/100
person_height_mtr
# please note the index which is not same as height
person_weight_kg = pd.Series([70,55,73,68,66],name='weight',
index = ['person_b','person_d','person_e','person_c','person_a'],dtype=np.float64)
person_weight_kg
#Let's calculate BMI
bmi = person_weight_kg/person_height_mtr**2
bmi
#BMI of person_c
68/(1.76784*1.76784)
index = ['person_a','person_b','person_c','person_d','person_e']
df_person = pd.DataFrame({'height':[5.5,5.2,5.8,6.1,4.8],'weight':[66,70,68,55,73],
'gender':['male','male','female','male','female']}, index=index)
df_person.describe(include='all')
print(df_person.ndim)
print(df_person.shape)
print(df_person.dtypes)
print(df_person.columns)
print(df_person.index)
print(len(df_person))
df_person.info()
df_person.reset_index()
df_person.reset_index().set_index('gender')
another_index = ['a','b','c','d','e']
df1 = df_person.reset_index()
df1.index = another_index
df1
# Accessing the column data
height = df_person['height']
height
type(height)
# Accessing multiple columns
df_person[['height','weight']]
# Accessinng one individual cell
df_person['height']['person_a']
# Accessing rows by index keys
df_person.loc['person_a']
# Accessing multiple rows using range
df_person['person_a':'person_c']
# Accessing using index position
print(df_person.iloc[0])
print("--------------")
print(df_person.iloc[0,2])
# Boolean indexinng
# all persons with height > 5.2 feet
df_person[df_person.height > 5.2]
# Boolean indexinng
# all persons with height > 5.2 feet and weight > 60kgs
df_person[(df_person.height > 5.2) & (df_person.weight > 60)]
df_person
# Let's add new column "age" to the DataFrame
df_person['age'] = pd.Series([30,28,26,19,42], index=index)
df_person
# Find all perons with age > 28
# Find females with age > 28
# Find max aged person
df_person[df_person.age==df_person.age.max()]
# Find max aged male person
# Find all persons having height > average height of the group
df_backup = df_person.copy()
df_person = df_backup.copy()
# Let's introduce few NaN values
df_person.loc['person_a','age'] = np.NaN
df_person.iloc[2,2] = np.NaN
df_person.loc['person_e','height'] = np.NaN
df_person.loc['person_f'] = np.NaN
df_person['married'] = np.NaN
df_person
# how takes 'all' or 'any'
# dropping all of the rows if all of the values are np.NaN
df_person.dropna(how='all')
# how takes 'all' or 'any'
# dropping all of the columns if all of the values are np.NaN
df_person.dropna(axis=1,how='all')
# Filling all of the NaN values with zero
df_person.fillna(0)
# replace NaN weight with average weight of the group
#df_person.weight.mean()
df_person['weight'].fillna(df_person.weight.mean())
df_person.fillna(method='ffill')
df_person.fillna(method='bfill')
df_person[df_person['weight'].notnull()]
df_person[df_person.notnull()['age']]
df_person = df_person.dropna(how='all')
df_person = df_person.dropna(how='all',axis=1)
df_person
df_person = df_person.fillna(method='ffill')
df_person
df_person['bmi'] = df_person['weight']/(((12* 2.54) * df_person['height']/100)**2)
df_person
df_person
df_person_grp = df_person.groupby('gender')
print(type(df_person_grp))
for group,data in df_person_grp:
print(group, data)
print("--------------------------------")
df_person_grp.mean()
df_person_grp.mean().plot(kind='bar')
df_person.apply(lambda x: x['gender'].upper()[0], axis=1)
df_person.columns
df_person[['weight','height']].apply(lambda x: x.dtype)
df_person.mean()
df_person['gender'].str.upper().str[0]
# settig seed ?
np.random.seed(5)
price = pd.Series(np.random.randint(100,high=150,size=150),
index=pd.date_range('2000-1-1', periods=150, freq='B'),name='col1')
price.head()
price.groupby(pd.TimeGrouper('1M')).max().plot(ylim=(146,150))
np.random.seed(5)
price1 = pd.Series(np.random.randint(200,high=250,size=500),
index=pd.date_range('2000-1-1', periods=500, freq='D'),name='col2')
#all_days = pd.date_range('2000-1-1', periods=500, freq='D')
df_time = pd.DataFrame({'col1':price,'col2':price1})
df_time.head()
len(df_time)
df_time.dtypes
df_time.plot(figsize=(16,8))
df_time.groupby(pd.TimeGrouper('1M')).mean().plot()
pd.Categorical?
pd.CategoricalIndex?
df
x = pd.Categorical(df_time['label'],ordered=True)
cat = pd.Series(df['label'], dtype=x)
cat
pd.merge?
pd.concat?
pd.Timestamp?
plt.plot([1,2,3,3.5,4.0],[1,2,3,3.2,3.8],
color='green', linestyle='dashed',
marker='o',markerfacecolor='blue',
markersize=8)
mylist = [0,1,0,1,2,3,4,0,1]
mycat = pd.Categorical(mylist,categories=[0,1])
mycat
mycat.set_categories([0,1,2,3,4])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A Series is like a fixed-size dict in that you can get and set values by index label
Step2: You can also use the index position to get and set the values
Step3: Vectorized operations and label alignment with Series
Step4: DataFrame
Step5: Accessing Data
Step6: Handling missing values
Step7: GroupBy function
Step8: Working with Text Data
Step9: Working with Dates and TimeSeries Data
|
10,251
|
<ASSISTANT_TASK:>
Python Code:
# Importing_new_features
# ..is easy. Features are collected
# in packages or modules. Just
import telnetlib # to use a
telnetlib.Telnet # client
# We can even import single classes
# from a module, like
from telnetlib import Telnet
# And read the module or class docs
help(telnetlib)
help(Telnet)
# you can print with the print() function
print("Hello world!")
# concatenate string with a + sign
# and using hex notation
print("Hello" + " " + "World\x21")
print("Ciao")
# prefixing a string with 'r' disables the
# interpretation of the string content
print('Hello' * 2 + r'World\x21')
# the chr() function returns the corresponding
# character of an integer. While \n and \t are
# just the usual notation for linefeed and tab
print(chr(72) + "ello\n\tWorld!")
# triple-quoting allows multi-line strings
# %s works like in the C printf() function
# but operates on strings
# ord() is just the inverse of chr()
print(The answer is
%s
% ord('*'))
# This is a comment, while
a = 1 # is an integer variable
b = 0x10 # is another integer in hex notation
# c = 011 # ...another one in C-style oct on python 2...
c = 0o11 # ...in python 2 and 3
# I can sum, multiply, and modulus
print(a + b, 5 % 2)
print(2 * c)
# variable_assignment
# I can assign more than one variable on the same line
a, b, c = 1, 2, 3
d, stringa_a, stringa_b = a + b, "pippo", "pluto"
# ...swap them...
(a, b) = (b, a)
# but if right-side values are not defined, I get an exception
e, f = c, e + d
# We should respect reserved words and functions, like print, ord...
print(("ord:\x20", ord))
ord = 4
ord('*') # ...ooops!
del ord # fix it up!
ord('*') # ...ooops!
## def formatting_numbers():
# bin() and hex() returns a string representation
# of a number
a, b1 = hex(10), bin(1)
# while the format() function can be more flexible
# 10 = 8ciphers + 2chars for the '0b' header
binary_with_leading_zeroes = format(1, '#010b')
# and reversible with
b1 == int(binary_with_leading_zeroes, base=2)
#def new_formatting():
# The new str.format function just replaces
# %s or %d with {}.
s_a = "is a string "
s_a += "that can {} extended".format("be")
# Further formatting is done using ":", eg.
# %.6s -> {:.6}
# %3.2d -> {:3.2}
s_a = "{} even with {:.6} formatting.\n".format(s_a, "positional")
# Alignment identifiers are simpler: < left , ^ center, > right
s_a = "Align {:>10}% python!".format(100)
print(s_a)
print("just prints a string")
# you can name variables to get
# a better formatting experience ;)
fmt_a = "{name:<.3} {nick:^.8} {sn:>30}"
print(fmt_a.format(name="-"*10, nick="*"*15, sn="-"*40))
print(fmt_a.format(name="Roberto", nick="ioggstream", sn="Polli"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python for System Administrator
Step2: Basic Arithmetic
Step3: Variable assignment
Step4: Formatting numbers
Step5: Formatting
Step6: Formatting with names
|
10,252
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
bank = pd.read_csv('bank/bank-full.csv',sep=";")
bank.head()
X = bank.iloc[:,:-1]
y = bank['y']
y = (y == 'yes')*1
X = np.array(X)
from sklearn.preprocessing import OneHotEncoder
bank = pd.get_dummies(bank,drop_first=True,sparse=True)
X = bank.iloc[:,:-1]
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
import itertools
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold, cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve, roc_curve,confusion_matrix, classification_report,precision_score
%matplotlib inline
kfold = KFold(n_splits=5)
maxd, maxleaf, scores = [], [], []
for i,j in itertools.product(range(2,10,2),[10,50,100,200,500]):
RFC=RandomForestClassifier(max_depth=i,max_leaf_nodes=j, min_samples_leaf=1,
min_samples_split=2,min_weight_fraction_leaf=0.0,oob_score=True)
score = RFC.fit(X,y).oob_score_
maxd.append(i); maxleaf.append(j);scores.append(score)
print('max_depth:',i,'max_leaf_nodes:',j,'score',score)
index=np.argmax(scores)
print('best:','max_depth:',maxd[index],'max_leaf_nodes:',maxleaf[index],'score',scores[index])
maxd, maxleaf, scores = [], [], []
for i,j in itertools.product(range(8,20,3),[500,1000,2000]):
RFC=RandomForestClassifier(max_depth=i,max_leaf_nodes=j, min_samples_leaf=1,
min_samples_split=2,min_weight_fraction_leaf=0.0,oob_score=True)
score = RFC.fit(X,y).oob_score_
maxd.append(i); maxleaf.append(j);scores.append(score)
print('max_depth:',i,'max_leaf_nodes:',j,'score',score)
index=np.argmax(scores)
print('best:','max_depth:',maxd[index],'max_leaf_nodes:',maxleaf[index],'score',scores[index])
minleaf, minsplit, scores = [], [], []
for i,j in itertools.product([1,10,30,50],[i/10.0 for i in range(1,10)]):
RFC=RandomForestClassifier(max_depth=11,max_leaf_nodes=500, min_samples_leaf=i,
min_samples_split=j,min_weight_fraction_leaf=0.0,oob_score=True)
score = RFC.fit(X,y).oob_score_
minleaf.append(i); minsplit.append(j);scores.append(score)
print('min_samples_leaf:',i,'min_samples_spl:',j,'score',"%.6f"%score)
index=np.argmax(scores)
print('best:','min_samples_leaf:',minleaf[index],'min_samples_spl:',minsplit[index],'score',scores[index])
weight, scores = [], []
for i in [i/10.0 for i in range(1,6)]:
RFC=RandomForestClassifier(max_depth=11,max_leaf_nodes=500, min_samples_leaf=10,
min_samples_split=0.1,min_weight_fraction_leaf=i,oob_score=True)
score = RFC.fit(X,y).oob_score_
weight.append(i);scores.append(score)
print('min_weight_fraction_leaf:',i,'score',score)
index=np.argmax(scores)
print('best:','min_weight_fraction_leaf:',weight[index],'score',scores[index])
from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCVearchCV
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = [
{'C': C_range, 'kernel': ['linear']},
{'C': C_range, 'gamma': gamma_range , 'kernel': ['rbf']},
]
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.4f" % (grid.best_params_, grid.best_score_))
maxd, maxleaf, scores = [], [], []
for i,j in itertools.product(range(2,10,2),[10,50,100,200,500]):
DTC=DecisionTreeClassifier(max_depth=i,max_leaf_nodes=j, min_samples_leaf=1,
min_samples_split=2,min_weight_fraction_leaf=0.0)
score = cross_val_score(DTC,X,y,cv=kfold).mean()
maxd.append(i); maxleaf.append(j);scores.append(score)
print('max_depth:',i,'max_leaf_nodes:',j,'score',score)
index=np.argmax(scores)
print('best:','max_depth:',maxd[index],'max_leaf_nodes:',maxleaf[index],'score',scores[index])
minleaf, minsplit, scores = [], [], []
for i,j in itertools.product([1,10,30,50],[i/10.0 for i in range(1,10)]):
DTC=DecisionTreeClassifier(max_depth=2,max_leaf_nodes=10, min_samples_leaf=i,
min_samples_split=j,min_weight_fraction_leaf=0.0)
score = cross_val_score(DTC,X,y,cv=kfold).mean()
minleaf.append(i); minsplit.append(j);scores.append(score)
print('min_samples_leaf:',i,'min_samples_spl:',j,'score',"%.6f"%score)
index=np.argmax(scores)
print('best:','min_samples_leaf:',minleaf[index],'min_samples_spl:',minsplit[index],'score',scores[index],2)
DTC=DecisionTreeClassifier(max_depth=2,max_leaf_nodes=10, min_samples_leaf=1,
min_samples_split=0.9,min_weight_fraction_leaf=0.0)
nesti, lrate, scores = [], [], []
for i,j in itertools.product([10,50,100,1000],np.logspace(-6,1,7)):
ABC = AdaBoostClassifier(base_estimator = DTC,n_estimators=i,learning_rate=j)
score = cross_val_score(ABC,X,y,cv=kfold).mean()
nesti.append(i); lrate.append(j);scores.append(score)
print('n_estimate:',i,'learning_rate:',"%.6f"%j,'score',"%.6f"%score)
index=np.argmax(scores)
print('best:','n_estimate:',minleaf[index],'learning_rate:',minsplit[index],'score',scores[index],2)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=0)
RFC=RandomForestClassifier(max_depth=11,max_leaf_nodes=500, min_samples_leaf=10,
min_weight_fraction_leaf=0.1,oob_score=True)
RFC.fit(X_train,y_train)
prob = RFC.predict_proba(X_test)
rcffpr, rcftpr, rcfthr = roc_curve(y_test, prob[:,1])
rcfpre, rcfrec, rcfthresh = precision_recall_curve(y_test, prob[:,1])
svc = SVC(probability=True, C=0.01,kernel='linear')
svc.fit(X_train,y_train)
pred = svc.predict_proba(X_test)
svcfpr, svctpr, svcthr = roc_curve(y_test, pred[:,1])
svcpre, svcrec, svcthresh = precision_recall_curve(y_test, pred[:,1])
DTC=DecisionTreeClassifier(max_depth=2,max_leaf_nodes=10, min_samples_leaf=1,
min_samples_split=0.9,min_weight_fraction_leaf=0.0)
ABC = AdaBoostClassifier(base_estimator = DTC,n_estimators=30,learning_rate=0.1)
ABC.fit(X_train,y_train)
pred = ABC.predict_proba(X_test)
abcfpr, abctpr, abcthr = roc_curve(y_test, pred[:,1])
abcpre, abcrec, abcthresh = precision_recall_curve(y_test, pred[:,1])
plt.figure()
lw = 2
plt.plot(rcffpr,rcftpr,lw=lw, label='Random Forest')
plt.plot(abcfpr,abctpr,lw=lw, label='Adaboost')
plt.plot(svcfpr,svctpr,lw=lw, label='Linear SVM')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for three methods')
plt.legend(loc="lower right")
plt.show()
plt.figure()
lw = 2
plt.plot(rcfrec,rcfpre,lw=lw, label='Random Forest')
plt.plot(abcrec,abcpre,lw=lw, label='Adaboost')
plt.plot(svcrec,svcpre,lw=lw, label='Linear SVM')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall curve for three methods')
plt.legend(loc=1)
plt.show()
svcpred = svc.predict(X_test)
rfcpred = RFC.predict(X_test)
abcpred = ABC.predict(X_test)
def plot_confusion_matrix(cm, classes, model, title='Confusion matrix (Normalized)',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('confusion matrix of {}'.format(model))
plt.colorbar()
plt.xticks(np.arange(2), classes)
plt.yticks(np.arange(2), classes)
plt.tight_layout()
plt.xlabel('True label',rotation='horizontal', ha='right')
plt.ylabel('Predicted label')
plt.show()
for i,j in zip(['svc','random forest','adaboost'],[svcpred,rfcpred,abcpred]):
cm = confusion_matrix(y_test, j)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plot_confusion_matrix(cm_normalized.T, ['NO','YES'], i)
cm_df = pd.DataFrame(cm.T, index=['NO','YES'], columns=['NO','YES'])
cm_df.index.name = 'Predicted'
cm_df.columns.name = 'True'
print(cm_df)
print('precision:',sum(y_test==j)/float(len(y_test)))
import theano
from theano import *
import theano.tensor as T
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=0)
X_train=scale(X_train)
X_test=scale(X_test)
y_train = y_train.values
p = 42
H = 3
x = T.vector('x')
W1 = theano.shared(value = np.random.randn(p*H).reshape((H,p)), name= 'W1')
w2 = theano.shared(value = np.random.randn(H), name= 'w2')
u1 = T.dot(W1,x)
h = T.nnet.relu(u1)
u2 = T.dot(h,w2)
y = T.scalar('y')
prob = T.nnet.sigmoid(u2)
R = - y * T.log(prob) - (1 - y) * T.log(1 - prob)
w2g = T.grad(R,w2)
W1g = T.grad(R,W1)
learn_rate = .05
W_updates = [(W1, W1 - learn_rate * W1g),
(w2, w2 - learn_rate * w2g)]
grad_step = theano.function([x,y],R,updates=W_updates)
for num in range(10):
n = X_train.shape[0]
for i in range(n):
grad_step(X_train[i,:],y_train[i])
W1.get_value().shape,w2.get_value()
ypred = np.array([prob.eval({x: X}) > .5 for X in X_test ])
print('precision:',sum(y_test==ypred)/float(len(y_test)))
def encode_labels(labels, max_index):
Encode the labels into binary vectors.
# Allocate the output labels, all zeros.
encoded = np.zeros((labels.shape[0], max_index + 1))
# Fill in the ones at the right indices.
for i in xrange(labels.shape[0]):
encoded[i, labels[i]] = 1
return encoded
labeled = encode_labels(y_train, 1)
W1_shape = (3, 42)
b1_shape = 3
W2_shape = (2, 3)
b2_shape = 2
W1 = shared(np.random.random(W1_shape) - 0.5, name="W1")
b1 = shared(np.random.random(b1_shape) - 0.5, name="b1")
W2 = shared(np.random.random(W2_shape) - 0.5, name="W2")
b2 = shared(np.random.random(b2_shape) - 0.5, name="b2")
x = T.dmatrix("x") # N x 784
labels = T.dmatrix("labels") # N x 10
hidden = T.nnet.sigmoid(x.dot(W1.transpose()) + b1)
output = T.nnet.softmax(hidden.dot(W2.transpose()) + b2)
prediction = T.argmax(output, axis=1)
reg_lambda = 0.0001
regularization = reg_lambda * ((W1 * W1).sum() + (W2 * W2).sum() + (b1 * b1).sum() + (b2 * b2).sum())
cost = T.nnet.binary_crossentropy(output, labels).mean() + regularization
#sigmoid
compute_prediction = function([x], prediction)
alpha = T.dscalar("alpha")
weights = [W1, W2, b1, b2]
updates = [(w, w - alpha * grad(cost, w)) for w in weights]
train_nn = function([x, labels, alpha],
cost,
updates=updates)
alpha = 10.0
costs = []
while True:
costs.append(float(train_nn(X_train, labeled, alpha)))
if len(costs) % 10 == 0:
print 'Epoch', len(costs), 'with cost', costs[-1], 'and alpha', alpha
if len(costs) > 2 and costs[-2] - costs[-1] < 0.0001:
if alpha < 0.2:
break
else:
alpha = alpha / 1.5
prediction = compute_prediction(X_test)
print('precision:',sum(y_test==prediction)/float(len(y_test)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 2.1 (30 pts) Predict y from X using kernel SVMs, random forests, and adaboost (see the sklearn.ensembles package). Tune the random forest using the out-of-bag error. Tune everything else using cross-validation, and assess the models using a separate test set with ROC, PR, confusion matrices. Write a paragraph about the relative performances of the algorithms and any observations that you've made.
Step2: tune random forest
Step3: the answer show that for both value, the higher the node and depth, the better the performance, in order to find the optimal one, I will try to set higher value.
Step4: the answer show that the best max_depth is 11, and best max_leaf_nodes are 500.
Step5: After finish tuning min_samples_leaf and min_samples_split, there only one weight left to be tune
Step6: Accordingly we have tuned all the parameters for random forest
Step7: Tune adaboost
Step8: so the best max_depth is 2 and best max_leaf_nodes in 10, then we can tune min_samples_leaf and min_samples_spl
Step9: So we got that the best parameter for min_sample_leaf is 1, and min_sample_split is 0.9. Then we can train the adaboost parameter n_estimator and learning rate.
Step10: so we can find that the best number of estimate is 30, and learning rate is 0.1.
Step11: Analysis tuned model with confusion matrix
Step12: Exercise 2.2 (Bonus
Step14: method 2
|
10,253
|
<ASSISTANT_TASK:>
Python Code:
# corpus ficticio con tres documentos de la misma longitud
# y sin repeticiones de términos dentro del mismo documento
# cada doc es una lista de palabras
d1 = 'los angeles times'.split()
d2 = 'new york times'.split()
d3 = 'new york post'.split()
# nuestro corpus D es una lista de documentos
D = [d1, d2, d3]
print(D)
# calculamos los valores de tf para cada término t y cada docID
# como un diccionario de diccionarios, tal que tf[t][docID] = valor
tf = {}
# iteramos sobre los documentos del corpus
for d in D:
# iteramos sobre las palabras del documento
for t in d:
# si no he visto el término t antes, creo la clave en tf
if t not in tf:
tf[t] = {}
# ¿cuál es el doc que estoy procesando?
docID = D.index(d)
# asigno el valor de tf para el término t y el documento actual
# (número de veces que aparece t dividido entre el número de palabras de d)
tf[t][docID] = d.count(t) / len(d)
print(tf)
# calculamos los valores de tf para cada término t y cada docID
# como un diccionario de listas, tal que tf[t][i] = valor
tf = {}
# primera iteración, creo el esqueleto del diccionario de listas
# iteramos sobre los documentos del corpus
for d in D:
# iteramos sobre las palabras del documento
for t in d:
# relleno todas las casillas con 0
tf[t] = [0] * len(D)
print('tf solo contiene 0s')
print(tf)
# segunda iteración, reasigno los valores sólo en aquellas posiciones donde sea necesario
# iteramos sobre los documentos del corpus
for d in D:
# iteramos sobre las palabras del documento
for t in d:
docID = D.index(d)
tf[t][docID] = d.count(t) / len(d)
print('\ntf contiene los valores de tf que corresponden')
print(tf)
# calculamos los valores de df para cada término t
df = {}
# iteramos sobre los término del vocabulario
for t in tf:
# reiniciamos los valores a 0
df[t] = 0
for d in D:
# para cada documento d que contenga a t, sumamos +1 al df correspondiente
if t in d:
df[t] += 1
print(df)
import math
# calculamos los valores de idf para cada término t
idf = {}
# iteramos sobre los término del vocabulario
for t in tf:
idf[t] = math.log(len(D) / df[t])
print(idf)
# calculamos los valores de tf.idf para cada término t y cada docID
# como un diccionario de listas, tal que tfidf[t][i] = valor
tfidf = {}
# iteramos sobre los términos del vocabulario
for t in tf:
tfidf[t] = [] # inicializamos con una lista vacía
# iteramos sobre los valores de tf del término t
for d in tf[t]:
# añadimos el nuevo valor multiplicando tf * idf
tfidf[t].append( d * idf[t])
print(tfidf)
def calcula_tf(corpus):
Calcula los valores de tf para cada término t de un corpus.
Devuelve un diccionario de listas tf[t][docID] = valor
import math
tf = {}
# primera iteración, creo el esqueleto del diccionario de listas
# iteramos sobre los documentos del corpus
for d in corpus:
# iteramos sobre las palabras del documento
for t in d:
# rellenamos las casillas con casi el log de casi 0
tf[t] = [math.log(0.00000001)] * len(D)
# segunda iteración, reasigno los valores sólo en aquellas posiciones donde sea necesario
# iteramos sobre los documentos del corpus
for d in corpus:
# iteramos sobre las palabras del documento
for t in d:
docID = corpus.index(d)
tf[t][docID] = 1 + math.log(d.count(t) / len(d)) # log normalization
return tf
def calcula_idf(vocabulario, corpus):
Calcula los valores de idf para una lista de vocabulario y un corpus.
Devuelve un diccionario idf[t] = valor
import math
# primero, calculamos los valores de df para cada término t
df = {}
# iteramos sobre los término del vocabulario
for t in vocabulario:
# reiniciamos los valores a 0
df[t] = 0
for d in corpus:
# para cada documento d que contenga a t, sumamos +1 al df correspondiente
if t in d:
df[t] += 1
# después, calculamos los valores de idf para cada término t
idf = {}
# iteramos sobre los término del vocabulario
for t in vocabulario:
idf[t] = math.log(len(corpus) / df[t])
return idf
def calcula_tfidf(tf, idf):
Calcula los valores de tf.idf para un diccionario de valores tf y otro de valores idf.
Devuelve un diccionario de listas tfidf[t][i] = valor
tfidf = {}
# iteramos sobre los términos del vocabulario
for t in tf:
tfidf[t] = [] # inicializamos con una lista vacía
# iteramos sobre los valores de tf del término t
for d in tf[t]:
# añadimos el nuevo valor multiplicando tf * idf
tfidf[t].append( d * idf[t])
return tfidf
# construyo un nuevo corpus como una lista de docs, donde cada doc es una lista de palabras
# https://www.goodreads.com/author/quotes/272231.Eminem
eminem_quotes = Love when spelled backwards and read phonetically reads evil|
Don’t do drugs don’t have unprotected sex don’t be violent Leave that to me|
If you have enemies good that means you stood up for something|
Somewhere deep down there's a decent man in me he just can't be found|
I can't tell you what it really is I can only tell you what it feels like|
Behind every sucessful person lies a pack of haters|
Sometimes I'm real cool but sometimes I could be a real asshole I think everyone is like that|
Love is just a word but you bring it definition|
Damn How much damage can you do with a pen|
Don't let them say you ain't beautiful They can all get fucked just stay true to you|
I come from Detroit where it's rough and I'm not a smooth talker|
If there's not drama and negativity in my life all my songs will be really wack and boring or something|
I always wished for this but it's almost turning into more of a nightmare than a dream|
Dealing with backstabbers there was one thing I learned They're only powerful when you got your back turned|
When I say I'll murder my baby's mother maybe I wanted to but I didn't Anybody who takes it literally is 10 times sicker than I am|
When you're a little kid you don't see color and the fact that my friends were black never crossed my mind It never became an issue until I was a teenager and started trying to rap|
It sometimes feels like a strange movie you know it’s all so weird that sometimes I wonder if it is really happening|
Personally I just think rap music is the best thing out there period If you look at my deck in my car radio you're always going to find a hip-hop tape; that's all I buy that's all I live that's all I listen to that's all I love|
I'm just a little bit sicker then the average individual I think|
Imma be what I set out to be without a doubt undoubtedly|
The truth is you don't know what is going to happen tomorrow Life is a crazy ride and nothing is guaranteed|
You'd have to walk a thousand miles in my shoes just to see what its like to be me|
Don't let them tell you ain't beautiful|
I act like shit don’t phase me inside it drives me crazy my insecurities could eat me alive|
But music is reflection of self we just explain it and then we get our checks in the mail|
Sometimes I feel like rap music is almost the key to stopping racism|
I might talk about killing people but that doesn't mean I do it|
Before I was famous when I was just working in Gilbert's Lodge everything was moving in slow motion|.split('|\n')
# nuestro corpus D es una lista de documentos
# cada doc es una lista de palabras
D = []
for quote in eminem_quotes:
D.append( quote.lower().split() )
print('Calculando los valores tf... ', end='')
tf = calcula_tf(D)
print('¡ok!')
print('Calculando los valores idf... ', end='')
idf = calcula_idf(tf.keys(), D)
print('¡ok!')
print('Calculando los valores tf.idf... ', end='')
tfidf = calcula_tfidf(tf, idf)
print('¡ok!\n\n')
# imprimimos los valores de algunos términos
print('love', tfidf['love'], '\n')
print('the', tfidf['the'], '\n')
print('backwards', tfidf['backwards'], '\n')
print('killing', tfidf['killing'], '\n')
print('Los valores tf.idf para cada término del vocabulario son:')
for t in tfidf:
print(t, '=>')
print(tfidf[t], '\n\n')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: tf (term frequency)
Step2: La aproximación anterior, tal cual está programada, arma un diccionario de diccionarios pero tiene varias desventajas
Step3: En el caso de este corpus ficticio, todos los valores de tf son, o bien 0 (si el término no aparece en el documento), o bien $1/3$ si aparece una sola vez.
Step4: Los valores de df son números enteros
Step5: Fíjate cómo interpretamos estos valores. Los términos que aparecen en un solo documento, tienen un idf más alto, son mejores descriptores del contenido de esos documentos, tienen más poder para discriminar temáticas. Los términos que se distribuyen en varios documentos tienen un idf más bajo, son peores descriptores.
Step10: Repetimos el experimento con más documentos
Step11: Ahora sí lo probamos
|
10,254
|
<ASSISTANT_TASK:>
Python Code:
m = folium.Map([45, 0], zoom_start=4)
folium.Marker([45, -30], popup="inline implicit popup").add_to(m)
folium.CircleMarker(
location=[45, -10],
radius=25,
popup=folium.Popup("inline explicit Popup")
).add_to(m)
ls = folium.PolyLine(
locations=[[43, 7], [43, 13], [47, 13], [47, 7], [43, 7]],
color='red'
)
ls.add_child(folium.Popup("outline Popup on Polyline"))
ls.add_to(m)
gj = folium.GeoJson(
data={
"type": "Polygon",
"coordinates": [[[27, 43], [33, 43], [33, 47], [27, 47]]]
}
)
gj.add_child(folium.Popup("outline Popup on GeoJSON"))
gj.add_to(m)
m.save(os.path.join('results', 'simple_popups.html'))
m
import json
import numpy as np
import vincent
scatter_points = {
'x': np.random.uniform(size=(100,)),
'y': np.random.uniform(size=(100,)),
}
# Let's create the vincent chart.
scatter_chart = vincent.Scatter(scatter_points,
iter_idx='x',
width=600,
height=300)
# Let's convert it to JSON.
scatter_json = scatter_chart.to_json()
# Let's convert it to dict.
scatter_dict = json.loads(scatter_json)
m = folium.Map([43, -100], zoom_start=4)
# Let's create a Vega popup based on scatter_chart.
popup = folium.Popup(max_width=800)
folium.Vega(scatter_chart, height=350, width=650).add_to(popup)
folium.Marker([30, -120], popup=popup).add_to(m)
# Let's create a Vega popup based on scatter_json.
popup = folium.Popup(max_width=800)
folium.Vega(scatter_json, height=350, width=650).add_to(popup)
folium.Marker([30, -100], popup=popup).add_to(m)
# Let's create a Vega popup based on scatter_dict.
popup = folium.Popup(max_width=800)
folium.Vega(scatter_dict, height=350, width=650).add_to(popup)
folium.Marker([30, -80], popup=popup).add_to(m)
m.save(os.path.join('results', 'vega_popups.html'))
m
import branca
m = folium.Map([43, -100], zoom_start=4)
html =
<h1> This is a big popup</h1><br>
With a few lines of code...
<p>
<code>
from numpy import *<br>
exp(-2*pi)
</code>
</p>
iframe = branca.element.IFrame(html=html, width=500, height=300)
popup = folium.Popup(iframe, max_width=2650)
folium.Marker([30, -100], popup=popup).add_to(m)
m.save(os.path.join('results', 'html_popups.html'))
m
# Let's create a Figure, with a map inside.
f = branca.element.Figure()
folium.Map([-25, 150], zoom_start=3).add_to(f)
# Let's put the figure into an IFrame.
iframe = branca.element.IFrame(width=500, height=300)
f.add_to(iframe)
# Let's put the IFrame in a Popup
popup = folium.Popup(iframe, max_width=2650)
# Let's create another map.
m = folium.Map([43, -100], zoom_start=4)
# Let's put the Popup on a marker, in the second map.
folium.Marker([30, -100], popup=popup).add_to(m)
# We get a map in a Popup. Not really useful, but powerful.
m.save(os.path.join('results', 'map_popups.html'))
m
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Vega Popup
Step3: Fancy HTML popup
Step4: Note that you can put another Figure into an IFrame ; this should let you do stange things...
|
10,255
|
<ASSISTANT_TASK:>
Python Code:
# We could tediously build a list …
# filenames = ['/data/Houston/realtime-tracer/LYLOUT_200524_210000_0600.dat.gz',]
# Instead, let's read a couple hours at the same time.
import sys, glob
filenames = glob.glob('/data/Houston/130619/LYLOUT_130619_2[0-1]*.dat.gz')
for filename in filenames:
print(filename)
import glob
import numpy as np
import datetime
import xarray as xr
import pyproj as proj4
from pyxlma.lmalib.io import read as lma_read
lma_data, starttime = lma_read.dataset(filenames)
# Should match what we expect from the filenames
print(starttime)
print(type(starttime))
print(lma_data)
print(type(lma_data.event_longitude))
lma_data.event_longitude
print(lma_data.event_longitude.attrs['standard_name'])
%matplotlib widget
import matplotlib.pyplot as plt
lma_data.plot.scatter?
fig, axes = plt.subplots(1,1,figsize=(10,10))
count_subset = {'number_of_events':slice(0,10000)}
art = lma_data[count_subset].plot.scatter('event_longitude', 'event_latitude', ax=axes,
s=4, marker='s', #hue='event_time',
)
fig, axes = plt.subplots(1,1,figsize=(10,10))
count_subset = {'number_of_events':slice(0,10000)}
station_filter = (lma_data.event_stations >= 6)
chi_filter = (lma_data.event_chi2 <= 1.0)
filter_subset = {'number_of_events':(chi_filter & station_filter)}
# note that we first filter on all the data select 10000 points, and then on that dataset we further filter
art = lma_data[filter_subset][count_subset].plot.scatter('event_longitude', 'event_latitude', ax=axes,
s=4, marker='s', #hue='event_time',
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Investigating the pyxlma data structure
Step2: lma_data is an xarray object. If we print it, we see that it looks much like a NetCDF file, with dimensions and variables that allow us to store whole arrays of data and give them names. xarray is the best way to look at NetCDF data in Python.
Step3: There are a few things to notice above.
Step4: Notice that this xarray DataArray variable is not only the data values, but some other metadata, such as the units and the standard variable name from the Climate and Forecast Metadata Conventions. We can access those attributes if we want them
Step5: Simple plotting and filtering
Step6: We have lots of data in the file; let's grab the first 10000 points to make the plotting faster.
Step7: Looks pretty noisy. Let's try again, but filter to lower chi2 and greater event_contributing_stations.
|
10,256
|
<ASSISTANT_TASK:>
Python Code:
import os
import glob
import itertools
import nestly
%load_ext rpy2.ipython
%load_ext pushnote
%%R
library(ggplot2)
library(dplyr)
library(tidyr)
library(gridExtra)
## min G+C cutoff
min_GC = 13.5
## max G+C cutoff
max_GC = 80
## max G+C shift
max_13C_shift_in_BD = 0.036
min_BD = min_GC/100.0 * 0.098 + 1.66
max_BD = max_GC/100.0 * 0.098 + 1.66
max_BD = max_BD + max_13C_shift_in_BD
print 'Min BD: {}'.format(min_BD)
print 'Max BD: {}'.format(max_BD)
# paths
workDir = '/home/nick/notebook/SIPSim/dev/bac_genome1147/'
buildDir = os.path.join(workDir, 'atomIncorp_taxaIncorp_MW-HR-SIP_preSpar')
dataDir = os.path.join(workDir, 'atomIncorp_taxaIncorp')
if not os.path.isdir(buildDir):
os.makedirs(buildDir)
%cd $buildDir
# making an experimental design file for qSIP
x = range(1,7)
y = ['control', 'treatment']
expDesignFile = os.path.join(buildDir, 'qSIP_exp_design.txt')
with open(expDesignFile, 'wb') as outFH:
for i,z in itertools.izip(x,itertools.cycle(y)):
line = '\t'.join([str(i),z])
outFH.write(line + '\n')
!head $expDesignFile
# building tree structure
nest = nestly.Nest()
# varying params
nest.add('percIncorp', [0, 15, 25, 50, 100])
nest.add('percTaxa', [1, 5, 10, 25, 50])
nest.add('rep', range(1,11))
## set params
nest.add('abs', ['1e9'], create_dir=False)
nest.add('np', [10], create_dir=False)
nest.add('Monte_rep', [100000], create_dir=False)
nest.add('subsample_dist', ['lognormal'], create_dir=False)
nest.add('subsample_mean', [9.432], create_dir=False)
nest.add('subsample_scale', [0.5], create_dir=False)
nest.add('subsample_min', [10000], create_dir=False)
nest.add('subsample_max', [30000], create_dir=False)
nest.add('min_BD', [min_BD], create_dir=False)
nest.add('max_BD', [max_BD], create_dir=False)
nest.add('DBL_scaling', [0.5], create_dir=False)
nest.add('bandwidth', [0.8], create_dir=False)
nest.add('heavy_BD_min', [1.71], create_dir=False)
nest.add('heavy_BD_max', [1.75], create_dir=False)
nest.add('topTaxaToPlot', [100], create_dir=False)
nest.add('padj', [0.1], create_dir=False)
nest.add('log2', [0.25], create_dir=False)
nest.add('occurs', ['0.0,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5'], create_dir=False)
### input/output files
nest.add('buildDir', [buildDir], create_dir=False)
nest.add('exp_design', [expDesignFile], create_dir=False)
# building directory tree
nest.build(buildDir)
# bash file to run
bashFile = os.path.join(buildDir, 'SIPSimRun.sh')
files = !find . -name "*.json"
dirs = [os.path.split(x)[0] for x in files]
srcFiles = ['OTU_abs1e9_PCR_sub_w.txt', 'OTU_abs1e9_PCR_sub_meta.txt', 'BD-shift_stats.txt']
for d in dirs:
for f in srcFiles:
f1 = os.path.join(dataDir, d, f)
f2 = os.path.join(buildDir, d, f)
cmd = 'cp -f {} {}'.format(f1, f2)
!$cmd
bashFileTmp = os.path.splitext(bashFile)[0] + '_HRSIP_multi.sh'
bashFileTmp
%%writefile $bashFileTmp
#!/bin/bash
# phyloseq
## making phyloseq object from OTU table
SIPSimR phyloseq_make \
OTU_abs{abs}_PCR_sub_w.txt \
-s OTU_abs{abs}_PCR_sub_meta.txt \
> OTU_abs{abs}_PCR_sub.physeq
## HR SIP pipeline
SIPSimR phyloseq_DESeq2 \
--log2 {log2} \
--hypo greater \
--cont 1,3,5 \
--treat 2,4,6 \
--occur_all {occurs} \
-w 1.71-1.75 \
--all OTU_abs1e9_PCR_sub_MW1_all.txt \
OTU_abs{abs}_PCR_sub.physeq \
> OTU_abs1e9_PCR_sub_MW1_DS2.txt
SIPSimR phyloseq_DESeq2 \
--log2 {log2} \
--hypo greater \
--cont 1,3,5 \
--treat 2,4,6 \
--occur_all {occurs} \
-w 1.71-1.78 \
--all OTU_abs1e9_PCR_sub_MW2_all.txt \
OTU_abs{abs}_PCR_sub.physeq \
> OTU_abs1e9_PCR_sub_MW2_DS2.txt
SIPSimR phyloseq_DESeq2 \
--log2 {log2} \
--hypo greater \
--cont 1,3,5 \
--treat 2,4,6 \
--occur_all {occurs} \
-w 1.69-1.74,1.73-1.78 \
--all OTU_abs1e9_PCR_sub_MW3_all.txt \
OTU_abs{abs}_PCR_sub.physeq \
> OTU_abs1e9_PCR_sub_MW3_DS2.txt
SIPSimR phyloseq_DESeq2 \
--log2 {log2} \
--hypo greater \
--cont 1,3,5 \
--treat 2,4,6 \
--occur_all {occurs} \
-w 1.70-1.73,1.72-1.75,1.74-1.77 \
--all OTU_abs1e9_PCR_sub_MW4_all.txt \
OTU_abs{abs}_PCR_sub.physeq \
> OTU_abs1e9_PCR_sub_MW4_DS2.txt
SIPSimR phyloseq_DESeq2 \
--log2 {log2} \
--hypo greater \
--cont 1,3,5 \
--treat 2,4,6 \
--occur_all {occurs} \
-w 1.69-1.73,1.72-1.76,1.75-1.79 \
--all OTU_abs1e9_PCR_sub_MW5_all.txt \
OTU_abs{abs}_PCR_sub.physeq \
> OTU_abs1e9_PCR_sub_MW5_DS2.txt
!chmod 777 $bashFileTmp
!cd $workDir; \
nestrun --template-file $bashFileTmp -d $buildDir --log-file HR-SIP_multi.log -j 10
%pushnote preSpar MW-HR-SIP complete
bashFileTmp = os.path.splitext(bashFile)[0] + '_cMtx.sh'
bashFileTmp
%%writefile $bashFileTmp
#!/bin/bash
# HR-SIP multiple 'heavy' BD windows
SIPSimR DESeq2_confuseMtx \
--libs 2,4,6 \
--padj {padj} \
-o DESeq2_MW1-cMtx \
BD-shift_stats.txt \
OTU_abs1e9_PCR_sub_MW1_DS2.txt
SIPSimR DESeq2_confuseMtx \
--libs 2,4,6 \
--padj {padj} \
-o DESeq2_MW2-cMtx \
BD-shift_stats.txt \
OTU_abs1e9_PCR_sub_MW2_DS2.txt
SIPSimR DESeq2_confuseMtx \
--libs 2,4,6 \
--padj {padj} \
-o DESeq2_MW3-cMtx \
BD-shift_stats.txt \
OTU_abs1e9_PCR_sub_MW3_DS2.txt
SIPSimR DESeq2_confuseMtx \
--libs 2,4,6 \
--padj {padj} \
-o DESeq2_MW4-cMtx \
BD-shift_stats.txt \
OTU_abs1e9_PCR_sub_MW4_DS2.txt
SIPSimR DESeq2_confuseMtx \
--libs 2,4,6 \
--padj {padj} \
-o DESeq2_MW5-cMtx \
BD-shift_stats.txt \
OTU_abs1e9_PCR_sub_MW5_DS2.txt
!chmod 777 $bashFileTmp
!cd $workDir; \
nestrun --template-file $bashFileTmp -d $buildDir --log-file cMtx.log -j 10
def agg_cMtx(prefix):
# all data
#!nestagg delim \
# -d $buildDir \
# -k percIncorp,percTaxa,rep \
# -o $prefix-cMtx_data.txt \
# --tab \
# $prefix-cMtx_data.txt
# overall
x = prefix + '-cMtx_overall.txt'
!nestagg delim \
-d $buildDir \
-k percIncorp,percTaxa,rep \
-o $x \
--tab \
$x
# by class
x = prefix + '-cMtx_byClass.txt'
!nestagg delim \
-d $buildDir \
-k percIncorp,percTaxa,rep \
-o $x \
--tab \
$x
agg_cMtx('DESeq2_MW1')
agg_cMtx('DESeq2_MW2')
agg_cMtx('DESeq2_MW3')
agg_cMtx('DESeq2_MW4')
agg_cMtx('DESeq2_MW5')
%pushnote preSpar MW-HR-SIP run complete!
F = os.path.join(buildDir, '*-cMtx_byClass.txt')
files = glob.glob(F)
files
%%R -i files
df_byClass = list()
for (f in files){
ff = strsplit(f, '/') %>% unlist
fff = ff[length(ff)]
df_byClass[[fff]] = read.delim(f, sep='\t')
}
df_byClass = do.call(rbind, df_byClass)
df_byClass$file = gsub('\\.[0-9]+$', '', rownames(df_byClass))
df_byClass$method = gsub('-cMtx.+', '', df_byClass$file)
rownames(df_byClass) = 1:nrow(df_byClass)
df_byClass %>% head(n=3)
%%R
# renaming method
rename = data.frame(method = c('DESeq2_MW1', 'DESeq2_MW2', 'DESeq2_MW3', 'DESeq2_MW4', 'DESeq2_MW4'),
method_new = c('1.71-1.75',
'1.71-1.78',
'1.69-1.74,\n1.73-1.78',
'1.70-1.73,\n1.72-1.75,\n1.74-1.77',
'1.69-1.73,\n1.72-1.76,\n1.75-1.79'))
df_byClass = inner_join(df_byClass, rename, c('method'='method')) %>%
select(-method) %>%
rename('method' = method_new)
df_byClass$method = factor(df_byClass$method, levels=rename$method_new %>% as.vector)
df_byClass %>% head(n=3)
%%R -w 800 -h 550
# summarize by SIPSim rep & library rep
df_byClass.s = df_byClass %>%
group_by(method, percIncorp, percTaxa, variables) %>%
summarize(mean_value = mean(values),
sd_value = sd(values))
# plotting
ggplot(df_byClass.s, aes(variables, mean_value, color=method,
ymin=mean_value-sd_value,
ymax=mean_value+sd_value)) +
geom_pointrange(alpha=0.8, size=0.2) +
labs(y='Value') +
facet_grid(percTaxa ~ percIncorp) +
theme_bw() +
theme(
text = element_text(size=16),
axis.title.x = element_blank(),
axis.text.x = element_text(angle=45, hjust=1)
)
%%R -w 850 -h 600
# summarize by SIPSim rep & library rep
vars = c('Balanced Accuracy', 'Sensitivity', 'Specificity')
df_byClass.s.f = df_byClass.s %>%
filter(variables %in% vars)
# plotting
ggplot(df_byClass.s.f, aes(variables, mean_value, fill=method,
ymin=mean_value-sd_value,
ymax=mean_value+sd_value)) +
#geom_pointrange(alpha=0.8, size=0.2) +
geom_bar(stat='identity', position='dodge', width=0.8) +
geom_errorbar(stat='identity', position='dodge', width=0.8) +
scale_y_continuous(breaks=seq(0, 1, 0.2)) +
scale_fill_discrete('"Heavy" BD window(s)') +
facet_grid(percTaxa ~ percIncorp) +
theme_bw() +
theme(
text = element_text(size=16),
axis.title.x = element_blank(),
axis.text.x = element_text(angle=45, hjust=1),
axis.title.y = element_blank()
)
%%R -w 750 -h 550
# summarize by SIPSim rep & library rep
vars = c('Balanced Accuracy', 'Sensitivity', 'Specificity')
df_byClass.s.f = df_byClass.s %>%
filter(variables %in% vars) %>%
ungroup() %>%
mutate(percTaxa = percTaxa %>% as.character,
percTaxa = percTaxa %>% reorder(percTaxa %>% as.numeric))
# plotting
p.pnt = ggplot(df_byClass.s.f, aes(percIncorp, mean_value,
color=percTaxa,
group=percTaxa,
ymin=mean_value-sd_value,
ymax=mean_value+sd_value)) +
geom_point(alpha=0.8) +
geom_linerange(alpha=0.8, size=0.5) +
geom_line() +
scale_color_discrete('% incorp-\norators') +
labs(x='13C atom % excess') +
facet_grid(method ~ variables) +
theme_bw() +
theme(
text = element_text(size=16),
axis.title.y = element_blank()
)
p.pnt
%%R
outFile = 'atomIncorp_taxaIncorp_MW-HR-SIP.pdf'
ggsave(outFile, p.pnt, width=9, height=7.3)
cat('File written:', file.path(getwd(), outFile), '\n')
%%R -h 250 -w 650
df_byClass.sf = df_byClass %>%
filter(variables == 'Specificity')
max_val = max(df_byClass.sf$values, na.rm=TRUE)
ggplot(df_byClass.sf, aes(values)) +
geom_histogram() +
scale_y_log10() +
labs(x='Specificity') +
theme_bw() +
theme(
text = element_text(size=16)
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: BD min/max
Step2: Nestly
Step3: Nestly params
Step4: Copying input files
Step5: Multi-window HR-SIP
Step6: Making confusion matrices
Step7: Aggregating the confusion matrix data
Step8: --End of simulation--
Step9: Checking that specificity is not always 1 (perfect)
|
10,257
|
<ASSISTANT_TASK:>
Python Code:
import graphlab;
products = graphlab.SFrame('amazon_baby.gl/')
products.head()
products['word_count'] = graphlab.text_analytics.count_words(products['review'])
products.head()
graphlab.canvas.set_target('ipynb')
products['name'].show()
giraffe_reviews = products[products['name'] == 'Vulli Sophie the Giraffe Teether']
len(giraffe_reviews)
giraffe_reviews['rating'].show(view='Categorical')
products['rating'].show(view='Categorical')
#ignore all 3* reviews
products = products[products['rating'] != 3]
#positive sentiment = 4* or 5* reviews
products['sentiment'] = products['rating'] >=4
products.head()
train_data,test_data = products.random_split(.8, seed=0)
sentiment_model = graphlab.logistic_classifier.create(train_data,
target='sentiment',
features=['word_count'],
validation_set=test_data)
sentiment_model.evaluate(test_data, metric='roc_curve')
sentiment_model.show(view='Evaluation')
giraffe_reviews['predicted_sentiment'] = sentiment_model.predict(giraffe_reviews, output_type='probability')
giraffe_reviews.head()
giraffe_reviews = giraffe_reviews.sort('predicted_sentiment', ascending=False)
giraffe_reviews.head()
giraffe_reviews[0]['review']
giraffe_reviews[1]['review']
giraffe_reviews[-1]['review']
giraffe_reviews[-2]['review']
selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate']
products['word_count'] = graphlab.text_analytics.count_words(products['review'])
selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate']
def awesome_count(cell):
if 'hate' in cell:
return cell['hate']
else:
return 0
products['hate'] = products['word_count'].apply(awesome_count)
products.head()
train_data,test_data = products.random_split(.8, seed=0)
selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate']
selected_words_model = graphlab.logistic_classifier.create(train_data,target='sentiment',features=selected_words,validation_set=test_data, )
selected_words_model['coefficients'].sort('value', ascending = True)
selected_words_model.evaluate(test_data)
sentiment_model.evaluate(test_data)
giraffe_reviews['predicted_sentiment'] = sentiment_model.predict(giraffe_reviews, output_type='probability')
giraffe_reviews.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read some product review data
Step2: Let's explore this data together
Step3: Build the word count vector for each review
Step4: Examining the reviews for most-sold product
Step5: Build a sentiment classifier
Step6: Define what's a positive and a negative sentiment
Step7: Let's train the sentiment classifier
Step8: Evaluate the sentiment model
Step9: Applying the learned model to understand sentiment for Giraffe
Step10: Sort the reviews based on the predicted sentiment and explore
Step11: Most positive reviews for the giraffe
Step12: Show most negative reviews for giraffe
|
10,258
|
<ASSISTANT_TASK:>
Python Code:
import urllib
url = 'http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2009'
data = pd.read_csv(url, parse_dates=['Date'])
import bokeh.plotting as bp
# 주피터 노트북이 아닌 파일로 출력하는 경우
# bp.output_file("../images/msft_1.html", title="Bokeh Example (Static)")
# 주피터 노트북에서 실행하여 출력하는 경우
bp.output_notebook()
p = bp.figure(title='Historical Stock Quotes', # 플롯 제목
x_axis_type ='datetime', # x 축은 날짜 정보
tools = '')
p.line(
data['Date'], # x 좌표
data['Close'], # y 좌표
color ='#0066cc', # 선 색상
legend ='MSFT', # 범례 이름
)
bp.show(p)
p = bp.figure(title='Historical Stock Quotes', # 플롯 제목
x_axis_type ='datetime', # x 축은 날짜 정보
tools = 'pan, wheel_zoom, box_zoom, reset, previewsave')
p.line(
data['Date'], # x 좌표
data['Close'], # y 좌표
color ='#0066cc', # 선 색상
legend ='MSFT', # 범례 이름
)
bp.show(p)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Bokeh 라이브러리 임포트
Step2: 플롯팅
Step3: 다음으로 Figure 클래스의 메서드를 호출하여 실제 플롯 객체를 추가한다. 우선 라인 플롯을 그리기 위해 line 메서드을 실행한다.
Step4: 이제 show 명령어를 호출하여 실제 차트를 렌더링 한다.
Step5: 상호작용 툴 추가하기
|
10,259
|
<ASSISTANT_TASK:>
Python Code:
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.13'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/trained_model; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
# Optional: Delete the version of the model if it already exists:
#gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ai-platform models delete ${MODEL_NAME}
# TODO: Create the model
gcloud ai-platform models create
# TODO: Create the model version
gcloud ai-platform versions create
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> Deploy trained model </h2>
Step2: <h2> Use model to predict (online prediction) </h2>
Step3: The predictions for the four instances were
|
10,260
|
<ASSISTANT_TASK:>
Python Code:
from numpy import linalg as LA
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def generate_test_image(m,n):
X = np.zeros((m,n))
# generate a rectangle
X[25:80,25:80] = 1
# generate a triangle
for i in range(25, 80, 1):
X[i+80:160, 100+i-1] = 2
# generate a circle
for i in range(0,200,1):
for j in range(0,200,1):
if ((i - 135)*(i - 135) +(j - 53)*(j - 53) <= 900):
X[i, j] = 3
return X
X = generate_test_image(200,200)
imgplot = plt.imshow(X, cmap='gray')
plt.title('Original Test Image');
m = X.shape[0] # num of rows
n = X.shape[1] # num of columns
X = np.asarray(X, dtype=np.float64)
C = np.cov(X)
np.linalg.matrix_rank(C)
P, L = LA.eigh(C)
P = P[::-1]
L = L[:,::-1]
np.allclose(L.dot(np.diag(P)).dot(L.T), C)
plt.semilogy(P, '-o')
plt.xlim([1, P.shape[0]])
plt.xlabel('eigenvalue index')
plt.ylabel('eigenvalue in a log scale')
plt.title('Eigenvalues of Covariance Matrix');
V = L.T.dot(X)
V.shape
k = 200
X_tilde = L[:,0:k-1].dot(L[:,0:k-1].T).dot(X)
np.allclose(X_tilde, X)
plt.imshow(X_tilde, cmap='gray')
plt.title('Approximated Image with full rank');
(P/P.sum()).sum()
plt.plot((P/P.sum()).cumsum(), '-o')
plt.title('Cumulative Sum of the Proportion of Total Variance')
plt.xlabel('index')
plt.ylabel('Proportion');
X_tilde_10 = L[:,0:10-1].dot(V[0:10-1,:])
X_tilde_20 = L[:,0:20-1].dot(V[0:20-1,:])
X_tilde_30 = L[:,0:30-1].dot(V[0:30-1,:])
X_tilde_60 = L[:,0:60-1].dot(V[0:60-1,:])
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12, 12))
ax1.imshow(X_tilde_10, cmap='gray')
ax1.set(title='Approximated Image with k = 10')
ax2.imshow(X_tilde_20, cmap='gray')
ax2.set(title='Approximated Image with k = 20')
ax3.imshow(X_tilde_30, cmap='gray')
ax3.set(title='Approximated Image with k = 30')
ax4.imshow(X_tilde_60, cmap='gray')
ax4.set(title='Approximated Image with k = 60');
symbol = ['IBM','MSFT', 'FB', 'T', 'INTC', 'ABX','NEM', 'AU', 'AEM', 'GFI']
start = "2015-09-01"
end = "2016-11-01"
portfolio_returns = get_pricing(symbol, start_date=start, end_date=end, fields="price").pct_change()[1:]
from sklearn.decomposition import PCA
num_pc = 2
X = np.asarray(portfolio_returns)
[n,m] = X.shape
print 'The number of timestamps is {}.'.format(n)
print 'The number of stocks is {}.'.format(m)
pca = PCA(n_components=num_pc) # number of principal components
pca.fit(X)
percentage = pca.explained_variance_ratio_
percentage_cum = np.cumsum(percentage)
print '{0:.2f}% of the variance is explained by the first 2 PCs'.format(percentage_cum[-1]*100)
pca_components = pca.components_
x = np.arange(1,len(percentage)+1,1)
plt.subplot(1, 2, 1)
plt.bar(x, percentage*100, align = "center")
plt.title('Contribution of principal components',fontsize = 16)
plt.xlabel('principal components',fontsize = 16)
plt.ylabel('percentage',fontsize = 16)
plt.xticks(x,fontsize = 16)
plt.yticks(fontsize = 16)
plt.xlim([0, num_pc+1])
plt.subplot(1, 2, 2)
plt.plot(x, percentage_cum*100,'ro-')
plt.xlabel('principal components',fontsize = 16)
plt.ylabel('percentage',fontsize = 16)
plt.title('Cumulative contribution of principal components',fontsize = 16)
plt.xticks(x,fontsize = 16)
plt.yticks(fontsize = 16)
plt.xlim([1, num_pc])
plt.ylim([50,100]);
factor_returns = X.dot(pca_components.T)
factor_returns = pd.DataFrame(columns=["factor 1", "factor 2"],
index=portfolio_returns.index,
data=factor_returns)
factor_returns.head()
factor_exposures = pd.DataFrame(index=["factor 1", "factor 2"],
columns=portfolio_returns.columns,
data = pca.components_).T
factor_exposures
labels = factor_exposures.index
data = factor_exposures.values
plt.subplots_adjust(bottom = 0.1)
plt.scatter(
data[:, 0], data[:, 1], marker='o', s=300, c='m',
cmap=plt.get_cmap('Spectral'))
plt.title('Scatter Plot of Coefficients of PC1 and PC2')
plt.xlabel('factor exposure of PC1')
plt.ylabel('factor exposure of PC2')
for label, x, y in zip(labels, data[:, 0], data[:, 1]):
plt.annotate(
label,
xy=(x, y), xytext=(-20, 20),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0')
);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will introduce PCA with an image processing example. A grayscale digital image can be represented by a matrix, whose $(i,j)^{th}$ entry corresponds to the measurement of gray
Step2: We start with a simple checkboard pattern, add some random normal noise, and add a gradient.
Step3: Set each row as a variable, with observations in the columns. Denote the covariance matrix of $\mathbf{X}$ as $\mathbf{C}$, where the size of $\mathbf{C}$ is $m \times m$. $\mathbf{C}$ is a matrix whose $(i,j)^{th}$ entry is the covariance between the $i^{th}$ row and $j^{th}$ row of the matrix $\mathbf{X}$.
Step4: Performing principal component analysis decomposes the matrix $\mathbf{C}$ into
Step5: The function LA.eigh lists the eigenvalues from small to large in $P$. Let us change the order first to list them from largest to smallest and make sure that $\mathbf{L}\mathbf{P}\mathbf{L}^{\top}==\mathbf{C}$.
Step6: Here we plot all of the eigenvalues
Step7: The $i^{th}$ principal component is given as $i^{th}$ row of $\mathbf{V}$,
Step8: If we multiply both sides on the left by $\mathbf{L}$, we get the following
Step9: The proportion of total variance due to the $i^{th}$ principal component is given by the ratio $\frac{\lambda_i}{\lambda_1 + \lambda_2 + \dots \lambda_m}.$ The sum of proportion of total variance should be $1$. As we defined, $\lambda_i$ is $i^{th}$ entry of $\mathbf{P}$,
Step10: Recall the number of principal components is denoted as $k$. Let $k$ be $10, 20, 30, 60$ as examples and take a look at the corresponding approximated images.
Step11: The number of variables in $X$ is $200$. When reducing the dimension to $k=60$, which uses half of the principal components, the approximated image is close to the original one.
Step12: Notice that the grand bulk of the variance of the returns of these assets can be explained by the first two principal components.
Step13: From these principal components we can construct "statistical risk factors", similar to more conventional common risk factors. These should give us an idea of how much of the portfolio's returns comes from some unobservable statistical feature.
Step14: The factor returns here are an analogue to the principal component matrix $\mathbf{V}$ in the image processing example.
Step15: The factor exposures are an analogue to the eigenvector matrix $\mathbf{L}$ in the image processing example.
|
10,261
|
<ASSISTANT_TASK:>
Python Code:
input = tf.placeholder(tf.float32, (None, 32, 32, 3))
filter_weights = tf.Variable(tf.truncated_normal((8, 8, 3, 20))) # (height, width, input_depth, output_depth)
filter_bias = tf.Variable(tf.zeros(20))
strides = [1, 2, 2, 1] # (batch, height, width, depth)
padding = 'VALID'
conv = tf.nn.conv2d(input, filter_weights, strides, padding) + filter_bias
# convo layer output layer shape:
# new_height = (input_height - filter_height + 2 * P)/S + 1
# new_width = (input_width - filter_width + 2 * P)/S + 1
((32 - 8 + 2*1) / 2 + 1), ((32 - 8 + 2*1) / 2 + 1), 20
# parameters in a convo layer
(8*8*3 +1) * (14*14*20)
((8*8*3)+1) * 20 + 20
# Output depth
k_output = 64
# Image Properties
image_width = 10
image_height = 10
color_channels = 3
# Convolution filter
filter_size_width = 5
filter_size_height = 5
# Input/Image
input = tf.placeholder(
tf.float32,
shape=[None, image_height, image_width, color_channels])
# Weight and bias
weight = tf.Variable(tf.truncated_normal(
[filter_size_height, filter_size_width, color_channels, k_output]))
bias = tf.Variable(tf.zeros(k_output))
# Apply Convolution
conv_layer = tf.nn.conv2d(input, weight, strides=[1, 2, 2, 1], padding='SAME')
# Add bias
conv_layer = tf.nn.bias_add(conv_layer, bias)
# Apply activation function
conv_layer = tf.nn.relu(conv_layer)
# Apply Max Pooling
conv_layer = tf.nn.max_pool(
conv_layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
input = tf.placeholder(tf.float32, (None, 4, 4, 5))
filter_shape = [1, 2, 2, 1]
strides = [1, 2, 2, 1]
padding = 'VALID'
pool = tf.nn.max_pool(input, filter_shape, strides, padding)
Setup the strides, padding and filter weight/bias such that
the output shape is (1, 2, 2, 3).
import tensorflow as tf
import numpy as np
# `tf.nn.conv2d` requires the input be 4D (batch_size, height, width, depth)
# (1, 4, 4, 1)
x = np.array([
[0, 1, 0.5, 10],
[2, 2.5, 1, -8],
[4, 0, 5, 6],
[15, 1, 2, 3]], dtype=np.float32).reshape((1, 4, 4, 1))
X = tf.constant(x)
x.shape
def conv2d(input):
# Filter (weights and bias)
# The shape of the filter weight is (height, width, input_depth, output_depth)
# The shape of the filter bias is (output_depth,)
# TODO: Define the filter weights `F_W` and filter bias `F_b`.
# NOTE: Remember to wrap them in `tf.Variable`, they are trainable parameters after all.
F_W = tf.Variable(tf.truncated_normal([2,2,1,3]))
F_b = tf.Variable(tf.zeros(3))
# TODO: Set the stride for each dimension (batch_size, height, width, depth)
strides = [1, 2, 2, 1]
# TODO: set the padding, either 'VALID' or 'SAME'.
padding = 'SAME'
# https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#conv2d
# `tf.nn.conv2d` does not include the bias computation so we have to add it ourselves after.
return tf.nn.conv2d(input, F_W, strides, padding) + F_b
out = conv2d(X)
# udacity's solution
def conv2d(input):
# Filter (weights and bias)
F_W = tf.Variable(tf.truncated_normal((2, 2, 1, 3))) # (height, width, input_depth, output_depth)
F_b = tf.Variable(tf.zeros(3)) # (output_depth)
strides = [1, 2, 2, 1]
padding = 'VALID'
return tf.nn.conv2d(input, F_W, strides, padding) + F_b
out_height = math.ceil(float(4 - 2 + 1) / float(2))
out_width = math.ceil(float(4 - 2 + 1) / float(2))
out_height, out_width
Set the values to `strides` and `ksize` such that
the output shape after pooling is (1, 2, 2, 1).
# `tf.nn.max_pool` requires the input be 4D (batch_size, height, width, depth)
# (1, 4, 4, 1)
x = np.array([
[0, 1, 0.5, 10],
[2, 2.5, 1, -8],
[4, 0, 5, 6],
[15, 1, 2, 3]], dtype=np.float32).reshape((1, 4, 4, 1))
X = tf.constant(x)
def maxpool(input):
# TODO: Set the ksize (filter size) for each dimension (batch_size, height, width, depth)
ksize = [1, 2, 2, 1]
# TODO: Set the stride for each dimension (batch_size, height, width, depth)
strides = [1, 2, 2, 1]
# TODO: set the padding, either 'VALID' or 'SAME'.
padding = 'VALID'
# https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#max_pool
return tf.nn.max_pool(input, ksize, strides, padding)
out = maxpool(X)
# udacity solution
def maxpool(input):
ksize = [1, 2, 2, 1]
strides = [1, 2, 2, 1]
padding = 'VALID'
return tf.nn.max_pool(input, ksize, strides, padding)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: calculate the number of parameters of a convo layer
Step2: The output layer shape is
Step3: There are 756,560 total parameters. That's a HUGE amount! Here's how we calculate it
Step4: simple cnn in tf
Step5: Max pooling
Step6: Recently, pooling layers have fallen out of favor. Some reasons are
Step8: 1x1 convulutions
Step9: Calculate the output height and width using the formula
Step11: using a pooling layer in tensorflow
|
10,262
|
<ASSISTANT_TASK:>
Python Code:
import requests
r = requests.get('https://www.baidu.com/')
print(type(r))
print(r.status_code)
print(type(r.text))
print(r.headers)
print(r.text)
print(r.cookies)
r = requests.post('http://httpbin.org/post')
print('----POST----\n', r.text)
r = requests.put('http://httpbin.org/put')
print('----PUT----\n', r.text)
r = requests.delete('http://httpbin.org/delete')
print('----DELETE----\n', r.text)
r = requests.head('http://httpbin.org/get')
print('----HEAD----\n', r.text)
r = requests.options('http://httpbin.org/get')
print('----OPTIONS----\n', r.text)
r = requests.get('http://httpbin.org/get')
print(r.text)
data = {
'name': 'germey',
'age': 22
}
r = requests.get("http://httpbin.org/get", params=data)
print(r.text)
r = requests.get("http://httpbin.org/get")
print('type(r.text) : ', type(r.text), '\n')
print('r.json() : ', r.json(), '\n')
print('type(r.json()) : ', type(r.json()), '\n')
import requests
import re
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
}
r = requests.get("https://www.zhihu.com/explore", headers=headers)
pattern = re.compile('explore-feed.*?question_link.*?>(.*?)</a>', re.S)
titles = re.findall(pattern, r.text)
print(titles)
r = requests.get("https://github.com/favicon.ico")
print(r.text)
print(r.content)
with open('data/favicon.ico', 'wb') as f:
f.write(r.content)
f.close()
r = requests.get("https://www.zhihu.com/explore")
print(r.text)
import requests
data = {'name': 'germey', 'age': '22'}
r = requests.post("http://httpbin.org/post", data=data)
print(r.text)
r = requests.get('http://www.jianshu.com')
print(type(r.status_code), r.status_code)
print(type(r.headers), r.headers)
print(type(r.cookies), r.cookies)
print(type(r.url), r.url)
print(type(r.history), r.history)
r = requests.get('http://www.jianshu.com')
exit() if not r.status_code == requests.codes.ok else print('Request Successfully')
files = {'file': open('data/favicon.ico', 'rb')}
r = requests.post("http://httpbin.org/post", files=files)
print(r.text)
import requests
r = requests.get("https://www.baidu.com")
print(r.cookies)
for key, value in r.cookies.items():
print(key + '=' + value)
headers = {
'Cookie': '_za=dc07d0bb-599c-46e9-8906-f6dd252910b4; d_c0="AIAABz1hvQmPTup3qtT92xkZN2UQoova_cc=|1460107885"; _zap=3c9b8860-2a38-4532-b476-c3617ff3fb0d; _ga=GA1.2.1945944829.1442545470; aliyungf_tc=AQAAANoM4BIxkAMAj67seLZyZFoQgALT; q_c1=a366104786da4623b23af9cd321e4d38|1484577173000|1468254315000; _xsrf=b9564d003dd4bdb7b6f9e6185b8a0b78; l_cap_id="MTZlMjVmODgwYTVlNGEyYzg1ZDBkNzhkMzNjYjMxYmI=|1484577173|1c7374494b0e1a7ef526ea93c7065b2a8b9736eb"; cap_id="MzU3OGUzZjYwYjIwNGNmNWFhMTk2OGU2NjRjOWE3MDk=|1484577173|b5723f8256e35bd66f86d1e070c3a3d4324c655a"; r_cap_id="YmQwYmEwNDIxMDYxNDBkZmI2NzAzNjI2ZjJkMzNmOGQ=|1484577175|01cabbd0c7a481a7e4a907ae60ba3e2c64d07ddc"; login="NWVhMDVlNzZjMzI0NGE3ZGIyMmExODFhNzEzNGVhNmY=|1484577185|45503ef9b8dc4c3468f776c202fbc7fe442f8521"; n_c=1; z_c0=Mi4wQUFDQXhFMGpBQUFBZ0FBSFBXRzlDUmNBQUFCaEFsVk5vV2FrV0FBQkU5RmpvbXljZE15a2FZNU8zQVRjS0g1Qm1n|1484577189|3dc63f1a6bd389c32162f4d901ebf10e6750dffc; nweb_qa=heifetz; __utma=51854390.1945944829.1442545470.1484577176.1484577176.1; __utmb=51854390.0.10.1484577176; __utmc=51854390; __utmz=51854390.1484577176.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmv=51854390.100-1|2=registration_date=20140101=1^3=entry_date=20140101=1',
'Host': 'www.zhihu.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
}
r = requests.get("http://www.zhihu.com", headers=headers)
print(r.text)
requests.get('http://httpbin.org/cookies/set/number/123456789')
r = requests.get('http://httpbin.org/cookies')
print(r.text)
s = requests.Session()
s.get('http://httpbin.org/cookies/set/number/123456789')
r = s.get('http://httpbin.org/cookies')
print(r.text)
import requests
response = requests.get('https://www.12306.cn')
print(response.status_code)
response = requests.get('https://www.12306.cn', verify=False)
print(response.status_code)
import requests
from requests.packages import urllib3
urllib3.disable_warnings()
response = requests.get('https://www.12306.cn', verify=False)
print(response.status_code)
import requests
r = requests.get("https://www.taobao.com", timeout = 1)
print(r.status_code)
r = requests.get('https://www.taobao.com', timeout=(5, 30))
print(r.status_code)
r = requests.get('https://www.taobao.com', timeout=None)
print(r.status_code)
r = requests.get('https://www.taobao.com')
print(r.status_code)
import requests
from requests.auth import HTTPBasicAuth
r = requests.get('http://120.27.34.24:9001', auth=HTTPBasicAuth('user', '123'))
print(r.status_code)
import requests
r = requests.get('http://120.27.34.24:9001', auth=('user', '123'))
print(r.status_code)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 请求响应的类型是 requests.models.Response
Step2: 状态码是200
Step3: 响应体的类型是字符串str
Step4: 可以得到响应的 HTTP HEADER
Step5: 响应体内容
Step6: Cookies的类型是RequestsCookieJar
Step7: 我们可以发现,使用了 requests.get(url) 方法就成功实现了一个 GET 请求。这倒不算什么,更方便的在于其他的请求类型依然可以用一句话来完成。
Step8: 其实这只是 requests 库的冰山一角,更多的还在后面呢。
Step9: 可以发现我们成功发起了get请求,请求的链接和头信息都有相应的返回。
Step10: 通过返回信息我们可以判断,请求的链接自动被构造成了 http
Step11: 但注意,如果返回结果不是Json格式,便会出现解析错误,抛出 json.decoder.JSONDecodeError 的异常。
Step12: 如上代码,我们请求了知乎-发现页面 https
Step13: 在这里打印了 response 的两个属性,一个是 text,另一个是 content 。前两行便是r.text的结果,最后一行是r.content的结果。
Step14: 在这里用了open() 函数,第一个参数是文件名称,第二个参数代表以二进制写的形式打开,可以向文件里写入二进制数据,然后保存。
Step15: 基本POST请求
Step16: 可以发现,成功获得了返回结果,返回结果中的form部分就是提交的数据,那么这就证明POST请求成功发送了。
Step17: 在这里分别打印输出了响应状态吗status_code,响应头headers,Cookies,请求连接,请求历史的类型和内容。可以看到,headers还有cookies这两个部分都是特定的数据结构,打开浏览器同样可以发现有同样的响应头信息。
Step18: 在这里,通过比较返回码和内置的成功的返回码是一致的,来保证请求得到了正常响应,输出成功请求的消息,否则程序终止。
Step19: 这个网站会返回一个响应,里面包含files这个字段,而form是空的,这证明文件上传部分,会单独有一个files来标识。
Step20: 可以看到,首先打印输出了cookie,可以发现它是一个RequestCookieJar类型。然后用items()方法将其转化为元组组成的列表,遍历输出每一个cookie的名和值。
Step21: 会话维持
Step22: 并不行。那这时候我们想起刚才说的Session了,改成这个试试看:
Step23: 成功获取!所以,利用Session我们可以做到模拟同一个会话,而且不用担心Cookie的问题,通常用于模拟登录成功之后再进行下一步的操作。
Step24: 提示一个错误,叫做SSLError,证书验证错误。
Step25: 不过发现报了一个警告,它提示建议让我们给它指定证书。
Step26: 不过这不是最好的方式,https协议的请求把证书验证都忽略了还有什么意义?
Step27: 通过这样的方式,我们可以将超时时间设置为1秒,如果1秒内没有响应,那就抛出异常。
Step28: 如果想永久等待,那么你可以直接将timeout设置为None,或者不设置,直接留空,因为默认是None。这样的话,如果服务器还在运行,但是响应特别慢,那就慢慢等吧,它永远不会返回超时错误的。
Step29: 或直接不加参数:
Step30: 身份认证
Step31: 如果用户名和密码正确的话,认证成功,那么运行结果会返回200,如果认证失败,则会返回401状态码。
|
10,263
|
<ASSISTANT_TASK:>
Python Code:
!pip install -q tensorflow-recommenders
!pip install -q --upgrade tensorflow-datasets
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the
# results of that search to a name in the local scope.
import os
import pprint
import tempfile
from typing import Dict, Text
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_recommenders as tfrs
# Show the currently installed version of TensorFlow
print("TensorFlow version: ",tf.version.VERSION)
ratings = tfds.load("movielens/100k-ratings", split="train")
ratings = ratings.map(lambda x: {
"movie_title": x["movie_title"],
"user_id": x["user_id"],
"user_rating": x["user_rating"]
})
tf.random.set_seed(42)
shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False)
# TODO 1a -- your code goes here
movie_titles = ratings.batch(1_000_000).map(lambda x: x["movie_title"])
user_ids = ratings.batch(1_000_000).map(lambda x: x["user_id"])
unique_movie_titles = np.unique(np.concatenate(list(movie_titles)))
unique_user_ids = np.unique(np.concatenate(list(user_ids)))
class RankingModel(tf.keras.Model):
def __init__(self):
super().__init__()
embedding_dimension = 32
# Compute embeddings for users.
# TODO 2a -- your code goes here
# Compute embeddings for movies.
# TODO 2b -- your code goes here
# Compute predictions.
self.ratings = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(64, activation="relu"),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1)
])
def call(self, inputs):
user_id, movie_title = inputs
user_embedding = self.user_embeddings(user_id)
movie_embedding = self.movie_embeddings(movie_title)
return self.ratings(tf.concat([user_embedding, movie_embedding], axis=1))
RankingModel()((["42"], ["One Flew Over the Cuckoo's Nest (1975)"]))
task = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
class MovielensModel(tfrs.models.Model):
def __init__(self):
super().__init__()
self.ranking_model: tf.keras.Model = RankingModel()
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_title"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
model = MovielensModel()
model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1))
cached_train = train.shuffle(100_000).batch(8192).cache()
cached_test = test.batch(4096).cache()
model.fit(cached_train, epochs=3)
# TODO 3a -- your code goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: This notebook uses TF2.x.
Step3: Lab Task 1
Step4: As before, we'll split the data by putting 80% of the ratings in the train set, and 20% in the test set.
Step5: Let's also figure out unique user ids and movie titles present in the data.
Step6: Lab Task 2
Step7: This model takes user ids and movie titles, and outputs a predicted rating
Step8: Loss and metrics
Step9: The task itself is a Keras layer that takes true and predicted as arguments, and returns the computed loss. We'll use that to implement the model's training loop.
Step10: Lab Task 3
Step11: Then shuffle, batch, and cache the training and evaluation data.
Step12: Then train the model
Step13: As the model trains, the loss is falling and the RMSE metric is improving.
|
10,264
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
# for quick visualization in notebook
import matplotlib.pyplot as plt
%matplotlib inline
N = 100 # number of points per class
D = 2 # dimensionality
K = 3 # number of classes
X = np.zeros((N*K,D)) # data matrix (each row = single example)
y = np.zeros(N*K, dtype='uint8') # class labels
for j in xrange(K):
ix = range(N*j,N*(j+1))
r = np.linspace(0.0,1,N) # radius
t = np.linspace(j*4,(j+1)*4,N) + np.random.randn(N)*0.2 # theta
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
y[ix] = j
# lets visualize the data:
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.xlim([-1,1])
plt.ylim([-1,1])
print np.c_.__doc__
# initialize parameters randomly
W = 0.01 * np.random.randn(D,K)
b = np.zeros((1,K))
num_examples = N*K
# compute class scores for a linear classifier
scores = np.dot(X, W) + b
print(scores.shape)
print(scores[50])
# some hyperparameters
step_size = 1e-0
reg = 1e-3 # regularization strength
# get unnormalized probabilities
exp_scores = np.exp(scores)
# normalize them for each example
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
print(probs.shape)
print(probs[50])
print(range(4))
correct_logprobs = -np.log(probs[range(N*K),y])
print(correct_logprobs.shape)
# compute the loss: average cross-entropy loss and regularization
data_loss = np.sum(correct_logprobs)/num_examples
reg_loss = 0.5*reg*np.sum(W*W)
loss = data_loss + reg_loss
print(loss)
dscores = probs
dscores[range(num_examples),y] -= 1
dscores /= num_examples
print(dscores.shape)
dW = np.dot(X.T, dscores)
db = np.sum(dscores, axis=0, keepdims=True)
dW += reg*W # don't forget the regularization gradient
# perform a parameter update
W += -step_size * dW
b += -step_size * db
#Train a Linear Classifier
# initialize parameters randomly
W = 0.01 * np.random.randn(D,K)
b = np.zeros((1,K))
# some hyperparameters
step_size = 1e-0
reg = 1e-3 # regularization strength
# gradient descent loop
num_examples = X.shape[0]
for i in xrange(200):
# evaluate class scores, [N x K]
scores = np.dot(X, W) + b
# compute the class probabilities
exp_scores = np.exp(scores)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # [N x K]
# compute the loss: average cross-entropy loss and regularization
corect_logprobs = -np.log(probs[range(num_examples),y])
data_loss = np.sum(corect_logprobs)/num_examples
reg_loss = 0.5*reg*np.sum(W*W)
loss = data_loss + reg_loss
if i % 10 == 0:
print "iteration %d: loss %f" % (i, loss)
# compute the gradient on scores
dscores = probs
dscores[range(num_examples),y] -= 1
dscores /= num_examples
# backpropate the gradient to the parameters (W,b)
dW = np.dot(X.T, dscores)
db = np.sum(dscores, axis=0, keepdims=True)
dW += reg*W # regularization gradient
# perform a parameter update
W += -step_size * dW
b += -step_size * db
scores = np.dot(X, W) + b
predicted_class = np.argmax(scores, axis=1)
print 'training accuracy: %.2f' % (np.mean(predicted_class == y))
# plot the resulting classifier
h = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = np.dot(np.c_[xx.ravel(), yy.ravel()], W) + b
Z = np.argmax(Z, axis=1)
Z = Z.reshape(xx.shape)
fig = plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
#fig.savefig('spiral_linear.png')
# initialize parameters randomly
h = 100 # size of hidden layer
W = 0.01 * np.random.randn(D,h)
b = np.zeros((1,h))
W2 = 0.01 * np.random.randn(h,K)
b2 = np.zeros((1,K))
# evaluate class scores with a 2-layer Neural Network
hidden_layer = np.maximum(0, np.dot(X, W) + b) # note, ReLU activation
scores = np.dot(hidden_layer, W2) + b2
# backpropate the gradient to the parameters
# first backprop into parameters W2 and b2
dW2 = np.dot(hidden_layer.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
dhidden = np.dot(dscores, W2.T)
# finally into W,b
dW = np.dot(X.T, dhidden)
db = np.sum(dhidden, axis=0, keepdims=True)
# initialize parameters randomly
h = 100 # size of hidden layer
W = 0.01 * np.random.randn(D,h)
b = np.zeros((1,h))
W2 = 0.01 * np.random.randn(h,K)
b2 = np.zeros((1,K))
# some hyperparameters
step_size = 1e-0
reg = 1e-3 # regularization strength
# gradient descent loop
num_examples = X.shape[0]
for i in xrange(10000):
# evaluate class scores, [N x K]
hidden_layer = np.maximum(0, np.dot(X, W) + b) # note, ReLU activation
scores = np.dot(hidden_layer, W2) + b2
# compute the class probabilities
exp_scores = np.exp(scores)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # [N x K]
# compute the loss: average cross-entropy loss and regularization
corect_logprobs = -np.log(probs[range(num_examples),y])
data_loss = np.sum(corect_logprobs)/num_examples
reg_loss = 0.5*reg*np.sum(W*W) + 0.5*reg*np.sum(W2*W2)
loss = data_loss + reg_loss
if i % 1000 == 0:
print "iteration %d: loss %f" % (i, loss)
# compute the gradient on scores
dscores = probs
dscores[range(num_examples),y] -= 1
dscores /= num_examples
# backpropate the gradient to the parameters
# first backprop into parameters W2 and b2
dW2 = np.dot(hidden_layer.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# next backprop into hidden layer
dhidden = np.dot(dscores, W2.T)
# backprop the ReLU non-linearity
dhidden[hidden_layer <= 0] = 0
# finally into W,b
dW = np.dot(X.T, dhidden)
db = np.sum(dhidden, axis=0, keepdims=True)
# add regularization gradient contribution
dW2 += reg * W2
dW += reg * W
# perform a parameter update
W += -step_size * dW
b += -step_size * db
W2 += -step_size * dW2
b2 += -step_size * db2
print(predicted_class.shape)
# evaluate training set accuracy
hidden_layer = np.maximum(0, np.dot(X, W) + b)
scores = np.dot(hidden_layer, W2) + b2
predicted_class = np.argmax(scores, axis=1)
print 'training accuracy: %.2f' % (np.mean(predicted_class == y))
# write forward pass into predict function
def predict(X):
Input: X is matrix of NxD with N samples each of dimension D
Output: predicted_class is vector of length N (1 prediction per sample)
hidden_layer = np.maximum(0, np.dot(X, W) + b)
scores = np.dot(hidden_layer, W2) + b2
predicted_class = np.argmax(scores, axis=1)
return predicted_class
# find arg across k where scores is max
Z = np.argmax(scores, axis=1) # class predictions 0,1,2
print(Z.shape, Z.size, Z[idx])
N = 100 # number of points per class
D = 2 # dimensionality
K = 3 # number of classes
X = np.zeros((N*K,D)) # data matrix (each row = single example)
y = np.zeros(N*K, dtype='uint8') # class labels
for j in xrange(K):
ix = range(N*j,N*(j+1))
r = np.linspace(0.0,1,N) # radius
t = np.linspace(j*4,(j+1)*4,N) + np.random.randn(N)*0.2 # theta
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
y[ix] = j
# lets visualize the data:
fig = plt.figure()
fig.set_size_inches(10,7)
# Put the probability scores into a color plot with training samples on it
# Plotting decision regions
h=0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
Z = predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.Spectral)
# plot training samples
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
#fig.savefig('spiral_net.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Training a softmax linear classifier
Step2: Softmax loss using cross-entropy
Step3: We now have an array probs of size [300 x 3], where each row now contains the class probabilities. In particular, since we’ve normalized them every row now sums to one. We can now query for the log probabilities assigned to the correct classes in each example
Step4: Evaluating this in the beginning (with random parameters) might give us loss = 1.1, which is np.log(1.0/3), since with small initial random weights all probabilities assigned to all classes are about one third. We now want to make the loss as low as possible, with loss = 0 as the absolute lower bound. But the lower the loss is, the higher are the probabilities assigned to the correct classes for all examples.
Step5: Lets update parameters
Step6: Full code
Step7: Training set accuracy
Step8: Training a 1-layer neural network
Step10: Full code
Step11: plot the decision boundaries
|
10,265
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Luke Bloy <luke.bloy@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from mne.filter import next_fast_len
import mne
print(__doc__)
data_path = mne.datasets.opm.data_path()
subject = 'OPM_sample'
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
bem_fname = op.join(subjects_dir, subject, 'bem',
subject + '-5120-5120-5120-bem-sol.fif')
src_fname = op.join(bem_dir, '%s-oct6-src.fif' % subject)
vv_fname = data_path + '/MEG/SQUID/SQUID_resting_state.fif'
vv_erm_fname = data_path + '/MEG/SQUID/SQUID_empty_room.fif'
vv_trans_fname = data_path + '/MEG/SQUID/SQUID-trans.fif'
opm_fname = data_path + '/MEG/OPM/OPM_resting_state_raw.fif'
opm_erm_fname = data_path + '/MEG/OPM/OPM_empty_room_raw.fif'
opm_trans_fname = None
opm_coil_def_fname = op.join(data_path, 'MEG', 'OPM', 'coil_def.dat')
raws = dict()
raw_erms = dict()
new_sfreq = 90. # Nyquist frequency (45 Hz) < line noise freq (50 Hz)
raws['vv'] = mne.io.read_raw_fif(vv_fname, verbose='error') # ignore naming
raws['vv'].load_data().resample(new_sfreq)
raws['vv'].info['bads'] = ['MEG2233', 'MEG1842']
raw_erms['vv'] = mne.io.read_raw_fif(vv_erm_fname, verbose='error')
raw_erms['vv'].load_data().resample(new_sfreq)
raw_erms['vv'].info['bads'] = ['MEG2233', 'MEG1842']
raws['opm'] = mne.io.read_raw_fif(opm_fname)
raws['opm'].load_data().resample(new_sfreq)
raw_erms['opm'] = mne.io.read_raw_fif(opm_erm_fname)
raw_erms['opm'].load_data().resample(new_sfreq)
# Make sure our assumptions later hold
assert raws['opm'].info['sfreq'] == raws['vv'].info['sfreq']
titles = dict(vv='VectorView', opm='OPM')
ssp_ecg, _ = mne.preprocessing.compute_proj_ecg(
raws['vv'], tmin=-0.1, tmax=0.1, n_grad=1, n_mag=1)
raws['vv'].add_proj(ssp_ecg, remove_existing=True)
# due to how compute_proj_eog works, it keeps the old projectors, so
# the output contains both projector types (and also the original empty-room
# projectors)
ssp_ecg_eog, _ = mne.preprocessing.compute_proj_eog(
raws['vv'], n_grad=1, n_mag=1, ch_name='MEG0112')
raws['vv'].add_proj(ssp_ecg_eog, remove_existing=True)
raw_erms['vv'].add_proj(ssp_ecg_eog)
fig = mne.viz.plot_projs_topomap(raws['vv'].info['projs'][-4:],
info=raws['vv'].info)
fig.suptitle(titles['vv'])
fig.subplots_adjust(0.05, 0.05, 0.95, 0.85)
kinds = ('vv', 'opm')
n_fft = next_fast_len(int(round(4 * new_sfreq)))
print('Using n_fft=%d (%0.1f sec)' % (n_fft, n_fft / raws['vv'].info['sfreq']))
for kind in kinds:
fig = raws[kind].plot_psd(n_fft=n_fft, proj=True)
fig.suptitle(titles[kind])
fig.subplots_adjust(0.1, 0.1, 0.95, 0.85)
# Here we use a reduced size source space (oct5) just for speed
src = mne.setup_source_space(
subject, 'oct5', add_dist=False, subjects_dir=subjects_dir)
# This line removes source-to-source distances that we will not need.
# We only do it here to save a bit of memory, in general this is not required.
del src[0]['dist'], src[1]['dist']
bem = mne.read_bem_solution(bem_fname)
fwd = dict()
trans = dict(vv=vv_trans_fname, opm=opm_trans_fname)
# check alignment and generate forward
with mne.use_coil_def(opm_coil_def_fname):
for kind in kinds:
dig = True if kind == 'vv' else False
fig = mne.viz.plot_alignment(
raws[kind].info, trans=trans[kind], subject=subject,
subjects_dir=subjects_dir, dig=dig, coord_frame='mri',
surfaces=('head', 'white'))
mne.viz.set_3d_view(figure=fig, azimuth=0, elevation=90,
distance=0.6, focalpoint=(0., 0., 0.))
fwd[kind] = mne.make_forward_solution(
raws[kind].info, trans[kind], src, bem, eeg=False, verbose=True)
del trans, src, bem
freq_bands = dict(
delta=(2, 4), theta=(5, 7), alpha=(8, 12), beta=(15, 29), gamma=(30, 45))
topos = dict(vv=dict(), opm=dict())
stcs = dict(vv=dict(), opm=dict())
snr = 3.
lambda2 = 1. / snr ** 2
for kind in kinds:
noise_cov = mne.compute_raw_covariance(raw_erms[kind])
inverse_operator = mne.minimum_norm.make_inverse_operator(
raws[kind].info, forward=fwd[kind], noise_cov=noise_cov, verbose=True)
stc_psd, sensor_psd = mne.minimum_norm.compute_source_psd(
raws[kind], inverse_operator, lambda2=lambda2,
n_fft=n_fft, dB=False, return_sensor=True, verbose=True)
topo_norm = sensor_psd.data.sum(axis=1, keepdims=True)
stc_norm = stc_psd.sum() # same operation on MNE object, sum across freqs
# Normalize each source point by the total power across freqs
for band, limits in freq_bands.items():
data = sensor_psd.copy().crop(*limits).data.sum(axis=1, keepdims=True)
topos[kind][band] = mne.EvokedArray(
100 * data / topo_norm, sensor_psd.info)
stcs[kind][band] = \
100 * stc_psd.copy().crop(*limits).sum() / stc_norm.data
del inverse_operator
del fwd, raws, raw_erms
def plot_band(kind, band):
Plot activity within a frequency band on the subject's brain.
title = "%s %s\n(%d-%d Hz)" % ((titles[kind], band,) + freq_bands[band])
topos[kind][band].plot_topomap(
times=0., scalings=1., cbar_fmt='%0.1f', vmin=0, cmap='inferno',
time_format=title)
brain = stcs[kind][band].plot(
subject=subject, subjects_dir=subjects_dir, views='cau', hemi='both',
time_label=title, title=title, colormap='inferno',
clim=dict(kind='percent', lims=(70, 85, 99)), smoothing_steps=10)
brain.show_view(dict(azimuth=0, elevation=0), roll=0)
return fig, brain
fig_theta, brain_theta = plot_band('vv', 'theta')
fig_alpha, brain_alpha = plot_band('vv', 'alpha')
fig_beta, brain_beta = plot_band('vv', 'beta')
fig_beta_opm, brain_beta_opm = plot_band('opm', 'beta')
fig_gamma, brain_gamma = plot_band('vv', 'gamma')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data, resample. We will store the raw objects in dicts with entries
Step2: Do some minimal artifact rejection just for VectorView data
Step3: Explore data
Step4: Alignment and forward
Step5: Compute and apply inverse to PSD estimated using multitaper + Welch.
Step7: Now we can make some plots of each frequency band. Note that the OPM head
Step8: Alpha
Step9: Beta
Step10: Gamma
|
10,266
|
<ASSISTANT_TASK:>
Python Code:
def total_value(P, m, r, n):
Total value of portfolio given parameters
Based on following formula:
A = \frac{P}{(r / m)} \left[ \left(1 + \frac{r}{m} \right)^{m \cdot n}
- 1 \right ]
:Input:
- *P* (float) - Payment amount per compounding period
- *m* (int) - number of compounding periods per year
- *r* (float) - annual interest rate
- *n* (float) - number of years to retirement
:Returns:
(float) - total value of portfolio
return P / (r / float(m)) * ( (1.0 + r / float(m))**(float(m) * n)
- 1.0)
P = 1500.0
m = 12
n = 20.0
r = numpy.linspace(0.05, 0.1, 100)
goal = 1e6
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(r, total_value(P, m, r, n))
axes.plot(r, numpy.ones(r.shape) * goal, 'r--')
axes.set_xlabel("r (interest rate)")
axes.set_ylabel("A (total value)")
axes.set_title("When can I retire?")
axes.ticklabel_format(axis='y', style='sci', scilimits=(-1,1))
plt.show()
def g(P, m, r, n, A):
Reformulated minimization problem
Based on following formula:
g(r) = \frac{P \cdot m}{A} \left[ \left(1 + \frac{r}{m} \right)^{m \cdot n} - 1 \right ]
:Input:
- *P* (float) - Payment amount per compounding period
- *m* (int) - number of compounding periods per year
- *r* (float) - annual interest rate
- *n* (float) - number of years to retirement
- *A* (float) - total value after $n$ years
:Returns:
(float) - value of g(r)
return P * m / A * ( (1.0 + r / float(m))**(float(m) * n)
- 1.0)
P = 1500.0
m = 12
n = 20.0
r = numpy.linspace(0.00, 0.1, 100)
goal = 1e6
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(r, g(P, m, r, n, goal))
axes.plot(r, r, 'r--')
axes.set_xlabel("r (interest rate)")
axes.set_ylabel("$g(r)$")
axes.set_title("When can I retire?")
axes.set_ylim([0, 0.12])
axes.ticklabel_format(axis='y', style='sci', scilimits=(-1,1))
plt.show()
r = 0.09
for steps in xrange(10):
print "r = ", r
print "Residual = ", g(P, m, r, n, goal) - r
r = g(P, m, r, n, goal)
print
x = numpy.linspace(0.2, 1.0, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, numpy.exp(-x), 'r')
axes.plot(x, x, 'b')
axes.set_xlabel("x")
axes.set_ylabel("f(x)")
x = 0.4
for steps in xrange(7):
print "x = ", x
print "Residual = ", numpy.exp(-x) - x
x = numpy.exp(-x)
print
axes.plot(x, numpy.exp(-x),'o',)
plt.show()
x = numpy.linspace(0.1, 1.0, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, -numpy.log(x), 'r')
axes.plot(x, x, 'b')
axes.set_xlabel("x")
axes.set_ylabel("f(x)")
axes.set_ylim([0.0, 1.5])
x = 0.5
for steps in xrange(3):
print "x = ", x
print "Residual = ", numpy.log(x) + x
x = -numpy.log(x)
print
axes.plot(x, -numpy.log(x),'o',)
plt.show()
x = numpy.linspace(0.0, 1.0, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, numpy.exp(-x), 'r')
axes.plot(x, x, 'b')
axes.set_xlabel("x")
axes.set_ylabel("f(x)")
x = numpy.linspace(0.4, 0.8, 100)
axes.plot(numpy.ones(x.shape) * 0.4, numpy.exp(-x),'--k')
axes.plot(x, numpy.ones(x.shape) * numpy.exp(-x[-1]), '--k')
axes.plot(numpy.ones(x.shape) * 0.8, numpy.exp(-x),'--k')
axes.plot(x, numpy.ones(x.shape) * numpy.exp(-x[0]), '--k')
plt.show()
x = numpy.linspace(0.1, 1.0, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, -numpy.log(x), 'r')
axes.plot(x, x, 'b')
axes.set_xlabel("x")
axes.set_ylabel("f(x)")
axes.set_ylim([0.0, 1.0])
x = numpy.linspace(0.4, 0.8, 100)
axes.plot(numpy.ones(x.shape) * 0.4, -numpy.log(x),'--k')
axes.plot(x, numpy.ones(x.shape) * -numpy.log(x[-1]), '--k')
axes.plot(numpy.ones(x.shape) * 0.8, -numpy.log(x),'--k')
axes.plot(x, numpy.ones(x.shape) * -numpy.log(x[0]), '--k')
plt.show()
x = numpy.linspace(0.4, 0.8, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, -numpy.exp(-x), 'r')
axes.set_xlabel("x")
axes.set_ylabel("f(x)")
plt.show()
import sympy
m, P, A, r, n = sympy.symbols('m, P, A, r, n')
(m * P / A * ((1 + r / m)**(m * n) - 1)).diff(r)
P = 1500.0
m = 12
n = 20.0
A = 1e6
r = numpy.linspace(0.05, 0.1, 100)
f = lambda r, A, m, P, n: A - m * P / r * ((1.0 + r / m)**(m * n) - 1.0)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(r, f(r, A, m, P, n), 'b')
axes.plot(r, numpy.zeros(r.shape),'r--')
axes.set_xlabel("r (%)")
axes.set_ylabel("f(r)")
axes.ticklabel_format(axis='y', style='sci', scilimits=(-1,1))
a = 0.075
b = 0.095
axes.plot(a, f(a, A, m, P, n), 'ko')
axes.plot([a, a], [0.0, f(a, A, m, P, n)], 'k--')
axes.plot(b, f(b, A, m, P, n), 'ko')
axes.plot([b, b], [f(b, A, m, P, n), 0.0], 'k--')
plt.show()
P = 1500.0
m = 12
n = 20.0
A = 1e6
r = numpy.linspace(0.05, 0.11, 100)
f = lambda r, A=A, m=m, P=P, n=n: A - m * P / r * ((1.0 + r / m)**(m * n) - 1.0)
# Initialize bracket
a = 0.07
b = 0.10
# Setup figure to plot convergence
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(r, f(r, A, m, P, n), 'b')
axes.plot(r, numpy.zeros(r.shape),'r--')
axes.set_xlabel("r (%)")
axes.set_ylabel("f(r)")
# axes.set_xlim([0.085, 0.091])
axes.ticklabel_format(axis='y', style='sci', scilimits=(-1,1))
axes.plot(a, f(a, A, m, P, n), 'ko')
axes.plot([a, a], [0.0, f(a, A, m, P, n)], 'k--')
axes.plot(b, f(b, A, m, P, n), 'ko')
axes.plot([b, b], [f(b, A, m, P, n), 0.0], 'k--')
# Algorithm parameters
TOLERANCE = 1e-4
MAX_STEPS = 100
# Initialize loop
f_a = f(a)
f_b = f(b)
delta_x = b - a
# Loop until we reach the TOLERANCE or we take MAX_STEPS
for step in xrange(MAX_STEPS):
c = a + delta_x / 2.0
f_c = f(c)
if numpy.sign(f_a) != numpy.sign(f_c):
b = c
f_b = f_c
else:
a = c
f_a = f_c
delta_x = b - a
# Plot iteration
axes.text(c, f(c), str(step))
# Check tolerance - Could also check the size of delta_x
if numpy.abs(f_c) < TOLERANCE:
break
if step == MAX_STEPS:
print "Reached maximum number of steps!"
else:
print "Success!"
print " x* = %s" % c
print " f(x*) = %s" % f(c)
print " number of steps = %s" % step
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Root Finding and Optimization
Step3: Fixed Point Iteration
Step4: Guess at $r_0$ and check to see what direction we need to go...
Step5: Example 2
Step6: Example 3
Step7: These are equivalent problems! Something is awry...
Step8: Additionally, suppose $g'(x)$ is defined for $x \in [a,b]$ and $\exists K < 1$ s.t. $|g'(x)| \leq K < 1 ~~~ \forall ~~~ x \in (a,b)$, then $g$ has a unique fixed point $P \in [a,b]$
Step9: Theorem 2
Step10: Better ways for root-finding/optimization
Step11: Bisection Algorithm
|
10,267
|
<ASSISTANT_TASK:>
Python Code:
# Oversampling factor: we would like to see spectral windows
# and periodograms in more detail than just at the Fourier frequencies
oversampling = 10
truefreq = 0.2284271247
# Time sampling (a):
ts = np.linspace(start = 1, stop = 90, num = 90)
df1 = pd.DataFrame({'time': ts,
'cst': np.ones(90),
'sin1': np.sin(2 * math.pi * truefreq * ts),
'sin2': np.sin(2 * math.pi * (71./90.) * ts)})
F_freqs1 = np.linspace(start = 0, stop = 90, num = 91) / 90
freqs1 = np.linspace(start = 0, stop = 2*90, num = oversampling*2*90+1) / 90
# Time sampling (b):
ts = np.linspace(start = 0.25, stop = 90, num = 4*90)
df2 = pd.DataFrame({'time': ts,
'cst': np.ones(len(ts)),
'sin1': np.sin(2 * math.pi * truefreq * ts),
'sin2': np.sin(2 * math.pi * (71./90.) * ts)})
F_freqs2 = np.linspace(start = 0, stop = 4*90, num = 4*90+1) / 90
freqs2 = np.linspace(start = 0, stop = 2*4*90, num = oversampling*2*4*90+1) / 90
# Time sampling (c):
ts = np.linspace(start = 1, stop = 4*90, num = 4*90)
df3 = pd.DataFrame({'time': ts,
'cst': np.ones(len(ts)),
'sin1': np.sin(2 * math.pi * truefreq * ts),
'sin2': np.sin(2 * math.pi * (71./90.) * ts)})
F_freqs3 = np.linspace(start = 0, stop = 4*90, num = 4*90+1) / (4*90)
freqs3 = np.linspace(start = 0, stop = 2*4*90, num = oversampling*2*4*90+1) / (4*90)
# Define the Deeming function (fft gives only values at the Fourier frequencies,
# and is applicable only for even sampling.)
def deemingPSD_fun(times, obs, freqs):
psd_tmp = np.ones(len(freqs))
for jj in np.arange(start = 0, stop = len(freqs)):
freq = freqs[jj]
v_tmp = 2.0 * math.pi * freq * times
s_tmp = np.dot(obs, np.sin(v_tmp))
c_tmp = np.dot(obs, np.cos(v_tmp))
psd_tmp[jj] = (s_tmp**2 + c_tmp**2) / len(times)**2
return psd_tmp
spw1 = deemingPSD_fun(times = df1['time'], obs = df1['cst'], freqs = freqs1)
psd11 = deemingPSD_fun(times = df1['time'], obs = df1['sin1'], freqs = freqs1)
psd12 = deemingPSD_fun(times = df1['time'], obs = df1['sin2'], freqs = freqs1)
spw2 = deemingPSD_fun(times = df2['time'], obs = df2['cst'], freqs = freqs2)
psd21 = deemingPSD_fun(times = df2['time'], obs = df2['sin1'], freqs = freqs2)
psd22 = deemingPSD_fun(times = df2['time'], obs = df2['sin2'], freqs = freqs2)
spw3 = deemingPSD_fun(times = df3['time'], obs = df3['cst'], freqs = freqs3)
psd31 = deemingPSD_fun(times = df3['time'], obs = df3['sin1'], freqs = freqs3)
psd32 = deemingPSD_fun(times = df3['time'], obs = df3['sin2'], freqs = freqs3)
plt.rcParams["figure.figsize"] = [12.0, 9.0]
fig = plt.figure(1)
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=None, hspace=0.35)
ax1 = fig.add_subplot(311)
plt.plot(freqs1, spw1, 'k', alpha=1)
plt.title('90 days, sampling 1/d')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Sp.win.')
plt.margins(.01, .05)
#plt.vlines(F_freqs1[F_freqs1 < 0.1], ymin = 0, ymax = 1, color='r', linestyle='solid')
#plt.xlim(0., .1)
ax2 = fig.add_subplot(312)
plt.plot(freqs2, spw2, 'k', alpha=1)
plt.title('90 days, sampling 4/d')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Sp.win.')
plt.margins(.01, .05)
#plt.vlines(F_freqs2[F_freqs2 < 0.1], ymin = 0, ymax = 1, color='r', linestyle='solid')
#plt.xlim(0., .1)
ax3 = fig.add_subplot(313)
plt.plot(freqs3, spw3, 'k', alpha=1)
plt.title('360 days, sampling 1/d')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Sp.win.')
plt.margins(.01, .05)
#plt.vlines(F_freqs3[F_freqs3 < 0.1], ymin = 0, ymax = 1, color='r', linestyle='solid')
#plt.xlim(0., .1)
plt.show()
fig = plt.figure(1)
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=None, hspace=0.35)
ax1 = fig.add_subplot(311)
plt.plot(freqs1, psd12, 'k', alpha=1)
plt.title('90 days, sampling 1/d')
plt.xlabel('Frequency [1/d]')
plt.ylabel('PSD')
plt.margins(0.01, 0.05)
#plt.vlines(F_freqs1, ymin = 0, ymax = 1, color='r', linestyle='solid')
#plt.xlim(0.5, 1)
plt.axvline(F_freqs1[71], ymin = 0, ymax = 1, color='c', linestyle='dashed')
ax2 = fig.add_subplot(312)
plt.plot(freqs2, psd22, 'k', alpha=1)
plt.title('90 days, sampling 4/d')
plt.xlabel('Frequency [1/d]')
plt.ylabel('PSD')
plt.margins(0.01, 0.05)
#plt.vlines(F_freqs2, ymin = 0, ymax = 1, color='r', linestyle='solid')
#plt.xlim(0.5, 1)
plt.axvline(F_freqs2[71], ymin = 0, ymax = 1, color='c', linestyle='dashed')
ax3 = fig.add_subplot(313)
plt.plot(freqs3, psd32, 'k', alpha=1)
plt.title('360 days, sampling 1/d')
plt.xlabel('Frequency [1/d]')
plt.ylabel('PSD')
plt.margins(0.01, 0.05)
#plt.vlines(F_freqs3, ymin = 0, ymax = 1, color='r', linestyle='solid')
#plt.xlim(0.5, 1)
plt.axvline(F_freqs3[284], ymin = 0, ymax = 1, color='c', linestyle='dashed')
plt.show()
drop_half_ind = (np.arange(360) % 4 < 2)
half_df = df3[drop_half_ind]
half_spw3 = deemingPSD_fun(times = half_df['time'], obs = half_df['cst'], freqs = freqs3)
half_psd31 = deemingPSD_fun(times = half_df['time'], obs = half_df['sin1'], freqs = freqs3)
half_psd32 = deemingPSD_fun(times = half_df['time'], obs = half_df['sin2'], freqs = freqs3)
fig = plt.figure(1)
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=None, hspace=0.35)
ax1 = fig.add_subplot(311)
plt.plot(freqs3, half_spw3, 'k', alpha=1)
plt.title('90 days, sampling 4/d with 50% missing data \n Spectral window')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Spectral window')
plt.margins(0.01, 0.05)
#plt.vlines(F_freqs3, ymin = 0, ymax = 1, color='r', linestyle='solid')
#plt.xlim(0.95,1.05)
ax2 = fig.add_subplot(312)
plt.plot(freqs3, half_psd32, 'k', alpha=1)
plt.title('Sine at Fourier frequency')
plt.xlabel('Frequency [1/d]')
plt.ylabel('PSD')
plt.margins(0.01, 0.05)
#plt.vlines(F_freqs2, ymin = 0, ymax = 1, color='r', linestyle='solid')
#plt.xlim(71./90.-.05, 71./90.+.05)
#plt.ylim(0.,0.3)
plt.axvline(71./90., ymin = 0, ymax = 1, color='c', linestyle='dashed')
ax3 = fig.add_subplot(313)
plt.plot(freqs3, half_psd31, 'k', alpha=1)
plt.title('Sine, F = 0.2284')
plt.xlabel('Frequency [1/d]')
plt.ylabel('PSD')
plt.margins(0.01, 0.05)
#plt.vlines(F_freqs3, ymin = 0, ymax = 1, color='r', linestyle='solid')
#plt.xlim(truefreq-.05, truefreq+.05)
#plt.ylim(0.,0.3)
plt.axvline(truefreq, ymin = 0, ymax = 1, color='c', linestyle='dashed')
plt.show()
rnd_df = half_df.sample(frac = 0.5).sort_values(axis = 0, by = 'time')
rnd_df['time'] = rnd_df['time'] + np.random.uniform(-0.1,0.1, size = 90)
rnd_spw2 = deemingPSD_fun(times = rnd_df['time'], obs = rnd_df['cst'], freqs = freqs2)
rnd_psd21 = deemingPSD_fun(times = rnd_df['time'], obs = rnd_df['sin1'], freqs = freqs2)
rnd_psd22 = deemingPSD_fun(times = rnd_df['time'], obs = rnd_df['sin2'], freqs = freqs2)
from astroML.time_series import lomb_scargle
cep1 = pd.read_csv("./data/OGLE-LMC-CEP-0727.csv")
ecl1 = pd.read_csv("./data/OGLE-SMC-ECL-0322.csv")
ecl2 = pd.read_csv("./data/OGLE-SMC-ECL-0124.csv")
qso = pd.read_csv("./data/q2803474.csv")
## Periods:
p_cep1 = 14.4891397
p_ecl1 = 71.6113338
p_ecl2 = 0.3270255
p_qso = 10**(-0.00138752348664116)
## The resolution computed from the timespan of the observations:
def fgrid_fun(times, ofac, fmax):
r_tmp = 1 / (times.max() - times.min()) / ofac
n_tmp = np.floor(fmax/r_tmp)
return np.linspace(r_tmp, fmax, n_tmp)
# The frequency grids for each object
fgrid_cep1 = fgrid_fun(cep1['time'], ofac = 10, fmax = 3)
fgrid_ecl1 = fgrid_fun(ecl1['time'], ofac = 10, fmax = 3)
fgrid_ecl2 = fgrid_fun(ecl2['time'], ofac = 10, fmax = 10)
fgrid_qso = fgrid_fun(qso['time'], ofac = 10, fmax = 6)
pgram_cep1 = lomb_scargle(t = cep1['time'], y = cep1['mag'], dy = cep1['mag.error'], omega = 2*np.pi*fgrid_cep1, generalized = True)
pgram_ecl1 = lomb_scargle(t = ecl1['time'], y = ecl1['mag'], dy = ecl1['mag.error'], omega = 2*np.pi*fgrid_ecl1, generalized = True)
pgram_ecl2 = lomb_scargle(t = ecl2['time'], y = ecl2['mag'], dy = ecl2['mag.error'], omega = 2*np.pi*fgrid_ecl2, generalized = True)
pgram_qso = lomb_scargle(t = qso['time'], y = qso['mag'], dy = qso['mag.error'], omega = 2*np.pi*fgrid_qso, generalized = True)
plt.rcParams["figure.figsize"] = [16., 8.]
fig = plt.figure(1)
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=None, hspace=0.35)
fig.add_subplot(221)
plt.plot(fgrid_cep1, pgram_cep1, 'k', alpha=1)
plt.title('Cepheid')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Periodogram')
plt.margins(0.01, 0.05)
plt.axvline(1/p_cep1, ymin = 0, ymax = 0.1, color='c', lw = 2)
fig.add_subplot(222)
plt.plot(fgrid_ecl1, pgram_ecl1, 'k', alpha=1)
plt.title('Detached(?) binary')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Periodogram')
plt.margins(0.01, 0.05)
plt.axvline(1/p_ecl1, ymin = 0, ymax = 0.1, color='c', lw = 2)
fig.add_subplot(223)
plt.plot(fgrid_ecl2, pgram_ecl2, 'k', alpha=1)
plt.title('Contact binary')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Periodogram')
plt.margins(0.01, 0.05)
plt.axvline(1/p_ecl2, ymin = 0, ymax = 0.1, color='c', lw = 2)
fig.add_subplot(224)
plt.plot(fgrid_qso, pgram_qso, 'k', alpha=1)
plt.title('Quasar candidate')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Periodogram')
plt.margins(0.01, 0.05)
plt.axvline(1/p_qso, ymin = 0, ymax = 0.1, color='c', lw = 2)
plt.show()
# Compute the period corresponding to the maximum of the periodogram:
p_est_ecl1 = 1 / fgrid_ecl1[pgram_ecl1 == pgram_ecl1.max()]
p_est_ecl2 = 1 / fgrid_ecl2[pgram_ecl2 == pgram_ecl2.max()]
# Check that it is indeed roughly the half of the catalog period:
print(p_est_ecl1 *2)
print(p_ecl1)
# Compute the phases and add them as column to the dataframes:
ecl1['phase2'] = (ecl1['time'] % p_est_ecl1) / p_est_ecl1
ecl2['phase2'] = (ecl2['time'] % p_est_ecl2) / p_est_ecl2
# plot:
plt.rcParams["figure.figsize"] = [12., 8.]
fig = plt.figure()
plt.subplots_adjust(left=0.05, bottom=0.1, right=0.95, top=0.95, wspace=None, hspace=0.35)
ax = fig.add_subplot(221)
plt.gca().invert_yaxis()
ax.errorbar(ecl1['phase2'], ecl1['mag'], ecl1['mag.error'], fmt='.k', ecolor='gray')
plt.title("ecl1, GLS period")
plt.xlabel('Phase')
plt.ylabel('Magnitude')
ax = fig.add_subplot(222)
plt.gca().invert_yaxis()
ax.errorbar(ecl1['phase'], ecl1['mag'], ecl1['mag.error'], fmt='.k', ecolor='gray')
plt.title("ecl1, catalog period")
plt.xlabel('Phase')
plt.ylabel('Magnitude')
ax = fig.add_subplot(223)
plt.gca().invert_yaxis()
ax.errorbar(ecl2['phase2'], ecl2['mag'], ecl2['mag.error'], fmt='.k', ecolor='gray')
plt.title("ecl2, GLS period")
plt.xlabel('Phase')
plt.ylabel('Magnitude')
ax = fig.add_subplot(224)
plt.gca().invert_yaxis()
ax.errorbar(ecl2['phase'], ecl2['mag'], ecl2['mag.error'], fmt='.k', ecolor='gray')
plt.title("ecl2, catalog period")
plt.xlabel('Phase')
plt.ylabel('Magnitude')
plt.show()
from astroML.time_series import multiterm_periodogram
fgrid_short_ecl1 = fgrid_ecl1[fgrid_ecl1 < 4.]
fgrid_short_ecl2 = fgrid_ecl2[(fgrid_ecl2 < 8.)]
pgram6_ecl1 = multiterm_periodogram(t = ecl1['time'], y = ecl1['mag'], dy = ecl1['mag.error'], omega = 2*np.pi*fgrid_short_ecl1, n_terms = 6)
pgram6_ecl2 = multiterm_periodogram(t = ecl2['time'], y = ecl2['mag'], dy = ecl2['mag.error'], omega = 2*np.pi*fgrid_short_ecl2, n_terms = 6)
p_est6_ecl1 = 1 / fgrid_short_ecl1[pgram6_ecl1 == pgram6_ecl1.max()]
print(p_est6_ecl1)
print(p_est_ecl1)
plt.rcParams["figure.figsize"] = [12., 8.]
fig = plt.figure(1)
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=None, hspace=0.35)
fig.add_subplot(211)
plt.plot(fgrid_ecl1, pgram_ecl1, 'k', alpha=1, label = "1 term")
plt.plot(fgrid_short_ecl1, pgram6_ecl1, 'r', alpha=1, linestyle = "dashed", label = "6 terms")
plt.title('Detached(?) binary')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Periodogram')
plt.margins(0.01, 0.05)
plt.xlim(-0.1,4.)
plt.axvline(1/p_ecl1, ymin = 0, ymax = 0.1, color='c', lw = 2)
plt.legend(loc = "best")
fig.add_subplot(212)
plt.plot(fgrid_ecl2, pgram_ecl2, 'k', alpha=1, label = "1 term")
plt.plot(fgrid_short_ecl2, pgram6_ecl2, 'r', alpha=1, linestyle = "dashed", label = "6 terms")
plt.title('Contact binary')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Periodogram')
plt.margins(0.01, 0.05)
plt.xlim(-0.1,8.)
plt.axvline(1/p_ecl2, ymin = 0, ymax = 0.1, color='c', lw = 2)
plt.legend(loc = "best")
plt.show()
import statsmodels.formula.api as smf
cep2 = pd.read_csv("./data/OGLE-SMC-CEP-1211.csv")
# Catalog period of the star in days:
p_cep2 = 9.5311467
# We'll fit models up to 10 harmonics with the catalog periodicity (normally, the
# found period should be used), and decide by the BIC which is best fitting. The harmonic
# models in the form a + sum_k (b_k * sin( omega * t)) + sum_k (c_k * sin( omega * t))
# are linear in their parameters, so we use linear least squares.
# Create a dataframe with all the necessary variables for the linear least squares fit:
for jj in np.arange(10):
cep2['s'+str(jj+1)] = np.sin(2*np.pi*cep2['time']*(jj+1)/p_cep2)
cep2['c'+str(jj+1)] = np.cos(2*np.pi*cep2['time']*(jj+1)/p_cep2)
bic_cep2 = np.zeros(10)
for jj in np.arange(10):
# We first create the formula string that the least squares fit takes as input
# (check the string that is created eg. by adding print formula_tmp):
formula_tmp = 'mag ~ ' + '+'.join(cep2.columns[4:(2*(jj+1)+4)])
# ... then fit the models and extract the BIC to decide which model is the best:
bic_cep2[jj] = smf.ols(formula_tmp, data = cep2).fit().bic
# Check the array of BICs: which is minimal?
bic_cep2
# Repeat the best model, and extract the residuals:
formula_tmp = 'mag ~ ' + '+'.join(cep2.columns[4:(2*(4+1)+4)])
cep2['resid'] = smf.ols(formula_tmp, data = cep2).fit().resid
# Perform period search:
fgrid_cep2 = fgrid_fun(cep2['time'], ofac = 10, fmax = 10)
pgram_resid_cep2 = lomb_scargle(t = cep2['time'], y = cep2['resid'], dy = cep2['mag.error'], omega = 2*np.pi*fgrid_cep2, generalized = True)
plt.rcParams["figure.figsize"] = [12., 4.]
fig = plt.figure(1)
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=None, hspace=None)
plt.plot(fgrid_cep2, pgram_resid_cep2, 'k', alpha=1)
plt.title('Cepheid 2')
plt.xlabel('Frequency [1/d]')
plt.ylabel('Periodogram')
plt.margins(0.01, 0.05)
plt.axvline(1/p_cep2, ymin = 0, ymax = 1, color='c', lw = 2, linestyle = "dashed")
plt.show()
print(cep2['time'].max())
print(cep2['time'].min())
centers_tmp = np.linspace(470, 4950, num = 449)
fgrid_short_cep2 = np.linspace(0.1, 0.11, 4000)
trsvd_cep2 = np.zeros([len(fgrid_short_cep2), len(centers_tmp)])
for ii in np.arange(0,len(centers_tmp)):
df = cep2[(cep2['time'] < centers_tmp[ii] + 365) & (cep2['time'] > centers_tmp[ii] - 365)]
trsvd_cep2[:,ii] = lomb_scargle(t = df['time'], y = df['mag'], dy = df['mag.error'], omega = 2*np.pi*fgrid_short_cep2, generalized = True)
# Find the candidate frequencies in each window (where the periodogram takes its maximum value):
bestfr_vs_time = np.zeros(len(centers_tmp))
for jj in np.arange(len(centers_tmp)):
bestfr_vs_time[jj] = fgrid_short_cep2[trsvd_cep2[:,jj] == trsvd_cep2[:,jj].max()]
# Plot the spectrogram, and add the constant catalog frequency and the best local frequencies from the sliding windows:
plt.rcParams["figure.figsize"] = [8., 8.]
fig = plt.figure()
plt.imshow(trsvd_cep2, origin = 'lower', aspect = 'auto',
extent = [centers_tmp[0], centers_tmp[-1], fgrid_short_cep2[0], fgrid_short_cep2[-1]])
plt.title('Cepheid 3')
plt.ylabel('Frequency [1/d]')
plt.xlabel('Time [d]')
plt.margins(0.01, 0.05)
plt.axhline(1/p_cep2, xmin = 0, xmax = 1, color='w')
plt.plot(cep2['time'], [0.1]*len(cep2['time']), '|', color='k')
plt.plot(centers_tmp, bestfr_vs_time, 'k')
plt.show()
from scipy import fftpack
from matplotlib.mlab import specgram
from astroML.fourier import wavelet_PSD
from scipy import ndimage
qpo = pd.read_csv("./data/qpo.csv")
# The values are photon counts in 2**(-11) s long intervals.
dt = 2**(-11)
# The times of the flux values thus:
times = np.arange(0, np.shape(qpo)[0]*dt, dt, dtype = float)
#qpo_slices = np.linspace(qpo['t'].min(), qpo['t'].max(), 400)
# We wish to compute the PSD in 8 s long windows. The time limits for selection:
qpo_slices = np.arange(0, 426*8, 8, dtype = float)
# The centers of the slices (for plotting):
center_times = np.arange(0, 425*8, 8, dtype = float)+4
# Spacing of the Fourier frequencies in a 8 s window:
df = 1. / 8
# the Fourier frequencies:
ffreqs = df * np.arange(16384)
psd_specgram, freqs_specgram, centers_specgram = specgram(qpo['flux'],
NFFT = 16384, Fs=2**11, noverlap=16384/2)
plt.rcParams["figure.figsize"] = [8., 8.]
fig = plt.figure()
plt.imshow(psd_specgram[20:,:], origin = 'lower', aspect = 'auto',
extent = [centers_specgram[0], centers_specgram[-1], freqs_specgram[20], freqs_specgram[-1]])
plt.title('Raw PSD')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [s]')
plt.margins(0.01, 0.05)
plt.show()
psd_specgram_smoo = ndimage.filters.gaussian_filter(psd_specgram, [8,8], mode='constant', cval = 0)
plt.rcParams["figure.figsize"] = [16., 8.]
fig = plt.figure()
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=None, hspace=0.35)
fig.add_subplot(121)
plt.imshow(psd_specgram[20:,:], origin = 'lower', aspect = 'auto',
extent = [centers_specgram[0], centers_specgram[-1], freqs_specgram[20], freqs_specgram[-1]])
plt.title('Raw PSD')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [s]')
plt.margins(0.01, 0.05)
fig.add_subplot(122)
plt.imshow(psd_specgram_smoo[40:,:], origin = 'lower', aspect = 'auto',
extent = [centers_specgram[0], centers_specgram[-1], freqs_specgram[40], freqs_specgram[-1]])
plt.title('Smoothed PSD')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [s]')
plt.margins(0.01, 0.05)
plt.show()
Q = 2000
freq_wavelet = np.arange(800,900, 0.25)
qpo_waveletpsd = wavelet_PSD(t = times[0:200000], h = qpo['flux'][0:200000],
f0 = freq_wavelet, Q = Q)
plt.rcParams["figure.figsize"] = [8., 8.]
fig = plt.figure(1)
plt.imshow(qpo_waveletpsd, origin = 'lower', aspect = 'auto',
extent = [times[0], times[200000], freq_wavelet[0], freq_wavelet[-1]])
plt.title('Wavelet PSD')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [s]')
plt.margins(0.01, 0.05)
plt.show()
from sklearn.gaussian_process import GaussianProcess
## Define the three kernel functions we'll use:
def exponential(x1, x2, h):
return np.exp(-0.5 * np.abs(x1 - x2) / h)
def squared_exponential(x1, x2, h):
return np.exp(-0.5 * (x1 - x2) ** 2 / h ** 2)
def quasi_periodic(x1, x2, h, l, nu):
return np.exp(-0.5 * (x1 - x2) ** 2 / h ** 2 - 0.5 * np.sin(2*np.pi*nu*(x1 - x2)) ** 2 / l)
x = np.linspace(52610, 54625, 115)
h = 50.0
l = 0.1
nu = 0.1
mu = np.zeros(len(x))
Cse = squared_exponential(x, x[:, None], h)
Ce = exponential(x, x[:, None], h)
Cq = quasi_periodic(x, x[:, None], h = h, l = l, nu = nu)
qper_rdvar = np.random.multivariate_normal(mu, Cq, 3)
ex_rdvar = np.random.multivariate_normal(mu, Ce, 3)
sqex_rdvar = np.random.multivariate_normal(mu, Cse, 3)
plt.rcParams["figure.figsize"] = [5., 10.]
fig = plt.figure(1)
plt.subplots_adjust(left=0.07, bottom=0.05, right=0.95, top=0.95, wspace=None, hspace=0.35)
fig.add_subplot(311)
for ii in [0,1,2]:
plt.plot(x, ex_rdvar[ii], 'k', alpha=1)
plt.title('Exponential')
plt.xlabel('x')
plt.ylabel('GP draw')
plt.margins(0.01, 0.05)
fig.add_subplot(312)
for ii in [0,1,2]:
plt.plot(x, sqex_rdvar[ii], 'k', alpha=1)
plt.title('Squared exponential')
plt.xlabel('x')
plt.ylabel('GP draw')
plt.margins(0.01, 0.05)
#plt.xlim(-0.1,4.)
fig.add_subplot(313)
for ii in [0,1,2]:
plt.plot(x, qper_rdvar[ii], 'k', alpha=1)
plt.title('Quasi-periodic')
plt.xlabel('x')
plt.ylabel('GP draw')
plt.margins(0.01, 0.05)
plt.show()
qso = pd.read_csv("./data/q2803474.csv")
qso_gp_sqex1 = GaussianProcess(corr='squared_exponential', theta0=0.5, thetaL = 0.001, thetaU = 100)
qso_gp_ex1 = GaussianProcess(corr='absolute_exponential', theta0=0.5, thetaL = 0.001, thetaU = 100)
qso_gp_sqex1.fit(qso['time'].reshape(-1,1), qso['mag'].reshape(-1,1))
m_var = np.mean(qso['mag.error'].reshape(-1,1)**2)
qso_gp_sqex1 = GaussianProcess(corr='squared_exponential', theta0=100, thetaL = 1, thetaU = 1000,
nugget = m_var)
qso_gp_ex1 = GaussianProcess(corr='absolute_exponential', theta0=100, thetaL = 0.001, thetaU = 1000,
nugget = m_var)
# Fit the model:
qso_gp_sqex1.fit(qso['time'].reshape(-1,1), qso['mag'].reshape(-1,1))
qso_gp_ex1.fit(qso['time'].reshape(-1,1), qso['mag'].reshape(-1,1))
print("Decay parameter in the squared exp. model:", qso_gp_sqex1.theta_)
print("Decay parameter in the absolute exp. model:", qso_gp_ex1.theta_)
print("Variance in the squared exp. model:", qso_gp_sqex1.sigma2)
print("Variance in the absolute exp. model:", qso_gp_ex1.sigma2)
print("R2 in the squared exp. model:", qso_gp_sqex1.score(qso['time'].reshape(-1,1), qso['mag'].reshape(-1,1)))
print("R2 in the absolute exp. model:", qso_gp_ex1.score(qso['time'].reshape(-1,1), qso['mag'].reshape(-1,1)))
tt = np.linspace(qso['time'].min(), qso['time'].max(), 500)
qso_gp_sqex1_pred, qso_gp_sqex1_mse = qso_gp_sqex1.predict(tt.reshape(-1,1), eval_MSE=True)
qso_gp_ex1_pred, qso_gp_ex1_mse = qso_gp_ex1.predict(tt.reshape(-1,1), eval_MSE=True)
plt.rcParams["figure.figsize"] = [16., 5.]
fig = plt.figure(1)
plt.subplots_adjust(left=0.07, bottom=0.05, right=0.95, top=0.95, wspace=None, hspace=0.35)
ax = fig.add_subplot(121)
plt.gca().invert_yaxis()
ax.plot(tt, qso_gp_sqex1_pred, '-', color='gray')
ax.fill_between(tt, qso_gp_sqex1_pred.reshape(len(tt)) - 2 * np.sqrt(qso_gp_sqex1_mse),
qso_gp_sqex1_pred.reshape(len(tt)) + 2 * np.sqrt(qso_gp_sqex1_mse),
color='gray', alpha=0.3)
ax.errorbar(qso['time'], qso['mag'], qso['mag.error'], fmt='.k', ms=6)
ax.set_xlabel('Time [d]')
ax.set_xlabel('Magnitude')
plt.title('Squared exponential model')
ax = fig.add_subplot(122)
plt.gca().invert_yaxis()
ax.plot(tt, qso_gp_ex1_pred, '-', color='gray')
ax.fill_between(tt, qso_gp_ex1_pred.reshape(len(tt)) - 2 * np.sqrt(qso_gp_ex1_mse),
qso_gp_ex1_pred.reshape(len(tt)) + 2 * np.sqrt(qso_gp_ex1_mse),
color='gray', alpha=0.3)
ax.errorbar(qso['time'], qso['mag'], qso['mag.error'], fmt='.k', ms=6)
ax.set_xlabel('Time [d]')
ax.set_xlabel('Magnitude')
plt.title('Absolute exponential model')
#plt.xlim(-0.1,4.)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualize first the three spectral windows. What are the differences between them? After inspecting it on the whole test frequency range, enlarge it
Step2: Consider now the sinusoid at the Fourier frequency. Again, enlarge it around its true frequency. What would be observed if computed only at the Fourier frequencies? Next replace ths sinusoid with the other one (stored in psd11, psd21, psd31) to see what could be seen at the Fourier frequencies.
Step3: TAKE-HOME MESSAGE
Step4: TAKE-HOME MESSAGE
Step5: Note the data does not contain any noise as yet
Step6: For the Cepheid, everything looks all right. For the quasar, the found frequency depended on the definition of the minimal frequency searched
Step7: 2.2 OPTIONAL
Step8: The multiterm periodogram found the right period in both cases. The reason is the presence of the even harmonics in the fitted light curve. While a sinusoid can have only one minimum within a period, harmonic 2 have double frequency (half the period), and therefore reproduce the two minima. Other period search methods, which do not fit sinusoids, can also perform better than the generalized Lomb-Scargle.
Step9: OPTIONAL
Step10: Ex. 3. TIME-RESOLVED ANALYSIS
Step11: The time-resolved sliding window periodogram shows well the period change of the Cepheid.
Step12: Plot the spectrogram (leaving out the high values of the red noise at low frequencies)
Step13: When omitting the low-frequency terms around f = 0 in the periodograms, the plot of the spectrogram would show just predominant noise.
Step14: The smoothing in frequency domain achieved that the periodograms are now consistent estimators of the spectral density of the true signal in the background. The smoothing in time-domain averaged (with Gaussian weights, so putting more weight on the central time) the close-by periodograms, emphasising those frequencies that are more persistently present in the spectrum. The QPO is now clearly emerging from the background.
Step15: EX. 4. GAUSSIAN PROCESSES
Step16: Ex. 4.2. CANDIDATE QUASAR FROM LINEAR
Step17: The above fit failed, because the given type of process could not produce light curves passing through each point. The observations are strongly grouped
Step18: We can take a look at various parameters of the model (the scale parameter, the residual scatter, and the R2, for example).
Step19: Finally, we can plot the model with its confidence band.
|
10,268
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
data = pd.read_table('spectrum_crab_hess_2006.txt',
comment='#', sep='\s*', engine='python')
data
def flux_ecpl(energy, flux1, gamma, energy_cut):
return flux1 * energy ** (-gamma) * np.exp(-energy / energy_cut)
energy = np.logspace(-0.5, 1.6, 100)
flux = flux_ecpl(energy, flux1=3.76e-11, gamma=2.39, energy_cut=14.3)
plt.plot(energy, flux)
plt.errorbar(x=data['energy'],
y=data['flux'],
yerr=data['flux_err'],
fmt='.'
)
plt.loglog();
def chi2(flux1, gamma, energy_cut):
energy = data['energy']
flux_model = flux_ecpl(energy, flux1, gamma, energy_cut)
chi = (data['flux'] - flux_model) / data['flux_err']
return np.sum(chi ** 2)
# TODO: visualise the likelihood as a 1D profile or
# 2D contour to check that the implementation is OK
# before fitting. E.g. reproduce Fig 19 from the paper?
# Maybe talk about how chi2 differences relate to
# confidence levels here?
from iminuit import Minuit
# TODO
import emcee
# TODO
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data
Step2: The model
Step3: Plot data and model
Step4: The likelihood
Step5: ML fit with Minuit
Step6: Analysis with emcee
|
10,269
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
cluster = tf.train.ClusterSpec({"local": ["localhost:2222", "localhost:2223"]})
server0 = tf.train.Server(cluster, job_name="local", task_index=0)
print(server0)
server1 = tf.train.Server(cluster, job_name="local", task_index=1)
print(server1)
import tensorflow as tf
n = 2
c1 = tf.Variable([])
c2 = tf.Variable([])
def matpow(M, n):
if n < 1:
return M
else:
return tf.matmul(M, matpow(M, n-1))
shape=[2500, 2500]
import datetime
with tf.device("/job:local/task:0/cpu:0"):
A = tf.random_normal(shape=shape)
c1 = matpow(A,n)
with tf.device("/job:local/task:1/cpu:0"):
B = tf.random_normal(shape=shape)
c2 = matpow(B,n)
with tf.Session("grpc://127.0.0.1:2222") as sess:
sum = c1 + c2
start_time = datetime.datetime.now()
print(sess.run(sum))
print("Execution time: "
+ str(datetime.datetime.now() - start_time))
with tf.device("/job:local/task:0/gpu:0"):
A = tf.random_normal(shape=shape)
c1 = matpow(A,n)
with tf.device("/job:local/task:1/cpu:0"):
B = tf.random_normal(shape=shape)
c2 = matpow(B,n)
with tf.Session("grpc://127.0.0.1:2222") as sess:
sum = c1 + c2
start_time = datetime.datetime.now()
print(sess.run(sum))
print("Execution time: "
+ str(datetime.datetime.now() - start_time))
with tf.device("/job:local/task:0/gpu:0"):
A = tf.random_normal(shape=shape)
c1 = matpow(A,n)
with tf.device("/job:local/task:1/gpu:0"):
B = tf.random_normal(shape=shape)
c2 = matpow(B,n)
with tf.Session("grpc://127.0.0.1:2222") as sess:
sum = c1 + c2
start_time = datetime.datetime.now()
print(sess.run(sum))
print("Execution time: "
+ str(datetime.datetime.now() - start_time))
with tf.device(tf.train.replica_device_setter(worker_device="/job:worker/task:0",
cluster=cluster)):
A = tf.random_normal(shape=shape)
c1 = matpow(A,n)
with tf.device(tf.train.replica_device_setter(worker_device="/job:worker/task:1",
cluster=cluster)):
B = tf.random_normal(shape=shape)
c2 = matpow(B,n)
with tf.Session("grpc://127.0.0.1:2222") as sess:
sum = c1 + c2
start_time = datetime.datetime.now()
print(sess.run(sum))
print("Multi node computation time: "
+ str(datetime.datetime.now() - start_time))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Start Server "Task 0" (localhost
Step2: Start Server "Task 1" (localhost
Step3: Define Compute-Heavy TensorFlow Graph
Step4: Define Shape
Step5: Assign Devices Manually
Step6: CPU and GPU
Step7: All GPU Devices
Step8: Auto-assign Device by TensorFlow (Round-Robin by Default)
|
10,270
|
<ASSISTANT_TASK:>
Python Code:
import time
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.cross_validation import cross_val_score
#from sklearn.model_selection import StratifiedShuffleSplit
#from sklearn.model_selection import cross_val_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import roc_curve, auc
from sklearn.utils import shuffle
from scipy import interp
%matplotlib inline
def remove_zero_columns(X, threshold=1e-20):
# convert zeros to nan, drop all nan columns, the replace leftover nan with zeros
X_non_zero_colum = X.replace(0, np.nan).dropna(how='all', axis=1).replace(np.nan, 0)
#.dropna(how='all', axis=0).replace(np.nan,0)
return X_non_zero_colum
def zero_fill_half_min(X, threshold=1e-20):
# Fill zeros with 1/2 the minimum value of that column
# input dataframe. Add only to zero values
# Get a vector of 1/2 minimum values
half_min = X[X > threshold].min(axis=0)*0.5
# Add the half_min values to a dataframe where everything that isn't zero is NaN.
# then convert NaN's to 0
fill_vals = (X[X < threshold] + half_min).fillna(value=0)
# Add the original dataframe to the dataframe of zeros and fill-values
X_zeros_filled = X + fill_vals
return X_zeros_filled
toy = pd.DataFrame([[1,2,3,0],
[0,0,0,0],
[0.5,1,0,0]], dtype=float)
toy_no_zeros = remove_zero_columns(toy)
toy_filled_zeros = zero_fill_half_min(toy_no_zeros)
print toy
print toy_no_zeros
print toy_filled_zeros
### Subdivide the data into a feature table
data_path = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/old_repo_revo_healthcare/'\
'data/MTBLS315/7-24-17_Malaria_pos_grouped_data_Craig_processed.csv'
## Import the data and remove extraneous columns
df = pd.read_csv(data_path)
print df.shape
print df.head()
# Make a new index of mz:rt
mz = df.loc[:,"mz"].astype('str')
rt = df.loc[:,"rt"].astype('str')
idx = mz+':'+rt
df.index = idx
df
# separate samples from xcms/camera things to make feature table
not_samples = ['mz', 'mzmin', 'mzmax', 'rt', 'rtmin', 'rtmax',
'isotopes', 'adduct']
samples_list = df.columns.difference(not_samples)
mz_rt_df = df[not_samples]
# convert to samples x features
X_df_raw = df[samples_list].T
# Remove zero-full columns and fill zeroes with 1/2 minimum values
X_df = remove_zero_columns(X_df_raw)
X_df_zero_filled = zero_fill_half_min(X_df)
print (X_df < 1e-20).sum().sum()
print "original shape: %s \n# zeros: %f\n" % (X_df_raw.shape, (X_df_raw < 1e-20).sum().sum())
print "zero-columns replaced? shape: %s \n# zeros: %f\n" % (X_df.shape,
(X_df < 1e-20).sum().sum())
print "zeros filled shape: %s \n#zeros: %f\n" % (X_df_zero_filled.shape,
(X_df_zero_filled < 1e-20).sum().sum())
# Convert to numpy matrix to play nicely with sklearn
X = X_df.as_matrix()
print X.shape
# Get mapping between sample name and assay names
path_sample_name_map = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/raw/'\
'MTBLS315/metadata/a_UPLC_POS_nmfi_and_bsi_diagnosis.txt'
# Index is the sample name
sample_df = pd.read_csv(path_sample_name_map,
sep='\t', index_col=0)
sample_df = sample_df['MS Assay Name']
sample_df.shape
print sample_df.head(10)
# get mapping between sample name and sample class
path_sample_class_map = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/raw/'\
'MTBLS315/metadata/s_NMFI and BSI diagnosis.txt'
class_df = pd.read_csv(path_sample_class_map,
sep='\t')
# Set index as sample name
class_df.set_index('Sample Name', inplace=True)
class_df = class_df['Factor Value[patient group]']
print class_df.head(10)
# convert all non-malarial classes into a single classes
# (collapse non-malarial febril illness and bacteremia together)
class_map_df = pd.concat([sample_df, class_df], axis=1)
class_map_df.rename(columns={'Factor Value[patient group]': 'class'}, inplace=True)
class_map_df
print class_map_df
# remove sample from y (labels) if not present in X (feature table)
print X_df.index
print class_map_df['MS Assay Name'].tolist()
for i in class_map_df['MS Assay Name']:
if i not in X_df.index:
print '\n\n\nFound the missing one!: ', i, '\n\n\n'
break
drop_idx = class_map_df[class_map_df['MS Assay Name'] == i].index
class_map_df = class_map_df.drop(drop_idx, axis=0)
print class_map_df
binary_class_map = class_map_df.replace(to_replace=['non-malarial febrile illness', 'bacterial bloodstream infection' ],
value='non-malarial fever')
binary_class_map
### Remove any rows from binary map that aren't in craig's actual feature table
# Remove any samples from y (classes) that aren't in feature table
idx = X_df.index.tolist()
print idx
new_idx = [i.replace('X','') for i in idx]
X_df.index = new_idx
print X_df.shape
print binary_class_map.shape
print binary_class_map
# convert classes to numbers
le = preprocessing.LabelEncoder()
le.fit(binary_class_map['class'])
y = le.transform(binary_class_map['class'])
print X.shape
print y.shape
def rf_violinplot(X, y, n_iter=25, test_size=0.3, random_state=1,
n_estimators=1000):
cross_val_skf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size,
random_state=random_state)
clf = RandomForestClassifier(n_estimators=n_estimators, random_state=random_state)
scores = cross_val_score(clf, X, y, cv=cross_val_skf)
sns.violinplot(scores,inner='stick')
rf_violinplot(X,y)
# TODO - Switch to using caret for this bs..?
# Do multi-fold cross validation for adaboost classifier
def adaboost_violinplot(X, y, n_iter=25, test_size=0.3, random_state=1,
n_estimators=200):
cross_val_skf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state)
clf = AdaBoostClassifier(n_estimators=n_estimators, random_state=random_state)
scores = cross_val_score(clf, X, y, cv=cross_val_skf)
sns.violinplot(scores,inner='stick')
adaboost_violinplot(X,y)
# TODO PQN normalization, and log-transformation,
# and some feature selection (above certain threshold of intensity, use principal components), et
def pqn_normalize(X, integral_first=False, plot=False):
'''
Take a feature table and run PQN normalization on it
'''
# normalize by sum of intensities in each sample first. Not necessary
if integral_first:
sample_sums = np.sum(X, axis=1)
X = (X / sample_sums[:,np.newaxis])
# Get the median value of each feature across all samples
mean_intensities = np.median(X, axis=0)
# Divde each feature by the median value of each feature -
# these are the quotients for each feature
X_quotients = (X / mean_intensities[np.newaxis,:])
if plot: # plot the distribution of quotients from one sample
for i in range(1,len(X_quotients[:,1])):
print 'allquotients reshaped!\n\n',
#all_quotients = X_quotients.reshape(np.prod(X_quotients.shape))
all_quotients = X_quotients[i,:]
print all_quotients.shape
x = np.random.normal(loc=0, scale=1, size=len(all_quotients))
sns.violinplot(all_quotients)
plt.title("median val: %f\nMax val=%f" % (np.median(all_quotients), np.max(all_quotients)))
plt.plot( title="median val: ")#%f" % np.median(all_quotients))
plt.xlim([-0.5, 5])
plt.show()
# Define a quotient for each sample as the median of the feature-specific quotients
# in that sample
sample_quotients = np.median(X_quotients, axis=1)
# Quotient normalize each samples
X_pqn = X / sample_quotients[:,np.newaxis]
return X_pqn
# Make a fake sample, with 2 samples at 1x and 2x dilutions
X_toy = np.array([[1,1,1,],
[2,2,2],
[3,6,9],
[6,12,18]], dtype=float)
print X_toy
print X_toy.reshape(1, np.prod(X_toy.shape))
X_toy_pqn_int = pqn_normalize(X_toy, integral_first=True, plot=True)
print X_toy_pqn_int
print '\n\n\n'
X_toy_pqn = pqn_normalize(X_toy)
print X_toy_pqn
print X.shape
X_pqn = pqn_normalize(X)
print X_pqn
rf_violinplot(X_pqn, y)
# Do multi-fold cross validation for adaboost classifier
adaboost_violinplot(X_pqn, y)
X_pqn_nlog = np.log(X_pqn)
rf_violinplot(X_pqn_nlog, y)
adaboost_violinplot(X_pqn_nlog, y)
def roc_curve_cv(X, y, clf, cross_val,
path='/home/irockafe/Desktop/roc.pdf',
save=False, plot=True):
t1 = time.time()
# collect vals for the ROC curves
tpr_list = []
mean_fpr = np.linspace(0,1,100)
auc_list = []
# Get the false-positive and true-positive rate
for i, (train, test) in enumerate(cross_val):
clf.fit(X[train], y[train])
y_pred = clf.predict_proba(X[test])[:,1]
# get fpr, tpr
fpr, tpr, thresholds = roc_curve(y[test], y_pred)
roc_auc = auc(fpr, tpr)
#print 'AUC', roc_auc
#sns.plt.plot(fpr, tpr, lw=10, alpha=0.6, label='ROC - AUC = %0.2f' % roc_auc,)
#sns.plt.show()
tpr_list.append(interp(mean_fpr, fpr, tpr))
tpr_list[-1][0] = 0.0
auc_list.append(roc_auc)
if (i % 10 == 0):
print '{perc}% done! {time}s elapsed'.format(perc=100*float(i)/cross_val.n_iter, time=(time.time() - t1))
# get mean tpr and fpr
mean_tpr = np.mean(tpr_list, axis=0)
# make sure it ends up at 1.0
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(auc_list)
if plot:
# plot mean auc
plt.plot(mean_fpr, mean_tpr, label='Mean ROC - AUC = %0.2f $\pm$ %0.2f' % (mean_auc,
std_auc),
lw=5, color='b')
# plot luck-line
plt.plot([0,1], [0,1], linestyle = '--', lw=2, color='r',
label='Luck', alpha=0.5)
# plot 1-std
std_tpr = np.std(tpr_list, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.2,
label=r'$\pm$ 1 stdev')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve, {iters} iterations of {cv} cross validation'.format(
iters=cross_val.n_iter, cv='{train}:{test}'.format(test=cross_val.test_size, train=(1-cross_val.test_size)))
)
plt.legend(loc="lower right")
if save:
plt.savefig(path, format='pdf')
plt.show()
return tpr_list, auc_list, mean_fpr
rf_estimators = 1000
n_iter = 3
test_size = 0.3
random_state = 1
cross_val_rf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state)
clf_rf = RandomForestClassifier(n_estimators=rf_estimators, random_state=random_state)
rf_graph_path = '''/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revolutionizing_healthcare/data/MTBLS315/\
isaac_feature_tables/uhplc_pos/rf_roc_{trees}trees_{cv}cviter.pdf'''.format(trees=rf_estimators, cv=n_iter)
print cross_val_rf.n_iter
print cross_val_rf.test_size
tpr_vals, auc_vals, mean_fpr = roc_curve_cv(X, y, clf_rf, cross_val_rf,
path=rf_graph_path, save=False)
# For adaboosted
n_iter = 3
test_size = 0.3
random_state = 1
adaboost_estimators = 200
adaboost_path = '''/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revolutionizing_healthcare/data/MTBLS315/\
isaac_feature_tables/uhplc_pos/adaboost_roc_{trees}trees_{cv}cviter.pdf'''.format(trees=adaboost_estimators,
cv=n_iter)
cross_val_adaboost = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state)
clf = AdaBoostClassifier(n_estimators=adaboost_estimators, random_state=random_state)
adaboost_tpr, adaboost_auc, adaboost_fpr = roc_curve_cv(X, y, clf, cross_val_adaboost,
path=adaboost_path)
print X.shape
# Make a null model AUC curve
def make_null_model(X, y, clf, cross_val, random_state=1, num_shuffles=5, plot=True):
'''
Runs the true model, then sanity-checks by:
Shuffles class labels and then builds cross-validated ROC curves from them.
Compares true AUC vs. shuffled auc by t-test (assumes normality of AUC curve)
'''
null_aucs = []
print y.shape
print X.shape
tpr_true, auc_true, fpr_true = roc_curve_cv(X, y, clf, cross_val)
# shuffle y lots of times
for i in range(0, num_shuffles):
#Iterate through the shuffled y vals and repeat with appropriate params
# Retain the auc vals for final plotting of distribution
y_shuffle = shuffle(y)
cross_val.y = y_shuffle
cross_val.y_indices = y_shuffle
print 'Number of differences b/t original and shuffle: %s' % (y == cross_val.y).sum()
# Get auc values for number of iterations
tpr, auc, fpr = roc_curve_cv(X, y_shuffle, clf, cross_val, plot=False)
null_aucs.append(auc)
#plot the outcome
if plot:
flattened_aucs = [j for i in null_aucs for j in i]
my_dict = {'true_auc': auc_true, 'null_auc': flattened_aucs}
df_poop = pd.DataFrame.from_dict(my_dict, orient='index').T
df_tidy = pd.melt(df_poop, value_vars=['true_auc', 'null_auc'],
value_name='auc', var_name='AUC_type')
#print flattened_aucs
sns.violinplot(x='AUC_type', y='auc',
inner='points', data=df_tidy)
# Plot distribution of AUC vals
plt.title("Distribution of aucs")
#sns.plt.ylabel('count')
plt.xlabel('AUC')
#sns.plt.plot(auc_true, 0, color='red', markersize=10)
plt.show()
# Do a quick t-test to see if odds of randomly getting an AUC that good
return auc_true, null_aucs
# Make a null model AUC curve & compare it to null-model
# Random forest magic!
rf_estimators = 1000
n_iter = 50
test_size = 0.3
random_state = 1
# Define Cross-validation and classifier
cross_val_rf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state)
clf_rf = RandomForestClassifier(n_estimators=rf_estimators, random_state=random_state)
true_auc, all_aucs = make_null_model(X, y, clf_rf, cross_val_rf, num_shuffles=5)
# make dataframe from true and false aucs
flattened_aucs = [j for i in all_aucs for j in i]
my_dict = {'true_auc': true_auc, 'null_auc': flattened_aucs}
df_poop = pd.DataFrame.from_dict(my_dict, orient='index').T
df_tidy = pd.melt(df_poop, value_vars=['true_auc', 'null_auc'],
value_name='auc', var_name='AUC_type')
print df_tidy.head()
#print flattened_aucs
sns.violinplot(x='AUC_type', y='auc',
inner='points', data=df_tidy, bw=0.7)
plt.show()
from sklearn.decomposition import PCA
# Check PCA of things
def PCA_plot(X, y, n_components, plot_color, class_nums, class_names, title='PCA'):
pca = PCA(n_components=n_components)
X_pca = pca.fit(X).transform(X)
print zip(plot_color, class_nums, class_names)
for color, i, target_name in zip(plot_color, class_nums, class_names):
# plot one class at a time, first plot all classes y == 0
#print color
#print y == i
xvals = X_pca[y == i, 0]
print xvals.shape
yvals = X_pca[y == i, 1]
plt.scatter(xvals, yvals, color=color, alpha=0.8, label=target_name)
plt.legend(bbox_to_anchor=(1.01,1), loc='upper left', shadow=False)#, scatterpoints=1)
plt.title('PCA of Malaria data')
plt.show()
PCA_plot(X, y, 2, ['red', 'blue'], [0,1], ['malaria', 'non-malaria fever'])
# convert classes to numbers
le = preprocessing.LabelEncoder()
le.fit(class_map_df['class'])
y_three_class = le.transform(class_map_df['class'])
print class_map_df.head(10)
print y_three_class
print X.shape
print y_three_class.shape
y_labels = np.sort(class_map_df['class'].unique())
print y_labels
colors = ['green', 'red', 'blue']
print np.unique(y_three_class)
PCA_plot(X, y_three_class, 2, colors, np.unique(y_three_class), y_labels)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> Import the dataframe and remove any features that are all zero </h2>
Step2: <h2> Get mappings between sample names, file names, and sample classes </h2>
Step3: <h2> Plot the distribution of classification accuracy across multiple cross-validation splits - Kinda Dumb</h2>
Step4: <h2> pqn normalize your features </h2>
Step5: <h2>Random Forest & adaBoost with PQN-normalized data</h2>
Step6: <h2> RF & adaBoost with PQN-normalized, log-transformed data </h2>
Step7: <h2> Great, you can classify things. But make null models and do a sanity check to make
Step8: <h2> This seems kinda wild </h2>
Step9: <h2> Let's see if PCA differentiates all three classes easily, it looks like it might... </h2>
|
10,271
|
<ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# TODO: Implement Min-Max scaling for grayscale image data
x_prime = map(lambda x: 0.1 + ((x*0.8)/(255)), image_data)
return np.array(list(x_prime))
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32, [None, features_count])
labels = tf.placeholder(tf.float32, [None, labels_count])
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal((features_count, labels_count)))
biases = tf.Variable(tf.zeros(labels_count))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 5
learning_rate = 0.05
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/Mean_Variance_Image.png" style="height
Step6: Checkpoint
Step7: Problem 2
Step8: <img src="image/Learn_Rate_Tune_Image.png" style="height
Step9: Test
|
10,272
|
<ASSISTANT_TASK:>
Python Code:
from owslib.csw import CatalogueServiceWeb
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw'
csw = CatalogueServiceWeb(endpoint, timeout=30)
import pandas as pd
ioos_ras = ['AOOS', # Alaska
'CaRA', # Caribbean
'CeNCOOS', # Central and Northern California
'GCOOS', # Gulf of Mexico
'GLOS', # Great Lakes
'MARACOOS', # Mid-Atlantic
'NANOOS', # Pacific Northwest
'NERACOOS', # Northeast Atlantic
'PacIOOS', # Pacific Islands
'SCCOOS', # Southern California
'SECOORA'] # Southeast Atlantic
url = 'https://raw.githubusercontent.com/ioos/registry/master/uuid.csv'
df = pd.read_csv(url, index_col=0, header=0, names=['UUID'])
df['UUID'] = df['UUID'].str.strip()
from owslib.fes import PropertyIsEqualTo
def query_ra(csw, uuid='B3EA8869-B726-4E39-898A-299E53ABBC98'):
q = PropertyIsEqualTo(propertyname='sys.siteuuid', literal='{%s}' % uuid)
csw.getrecords2(constraints=[q], maxrecords=2000, esn='full')
return csw
for ra in ioos_ras:
try:
uuid = df.ix[ra]['UUID'].strip('{').strip('}')
csw = query_ra(csw, uuid)
ret = csw.results['returned']
word = 'records' if ret > 1 else 'record'
print("{0:>8} has {1:>4} {2}".format(ra, ret, word))
csw.records.clear()
except KeyError:
pass
HTML(html)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will use the same list of all the Regional Associations as before,
Step2: The function below is similar to the one we used before.
Step3: Compare the results above with cell [6] from before. Note that now we got 192 for PacIOOS and 74 for AOOS now!
|
10,273
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
x = np.linspace(0,4*np.pi,10)
x
f = np.sin(x)
f
plt.plot(x, f, marker='o')
plt.xlabel('x')
plt.ylabel('f(x)');
from scipy.interpolate import interp1d
x = np.linspace(0,4*np.pi,10) # only use 10 points to emphasize this is an approx
f = np.sin(x)
sin_approx = interp1d(x, f, kind='cubic')
newx = np.linspace(0,4*np.pi,100)
newf = sin_approx(newx)
plt.plot(x, f, marker='o', linestyle='', label='original data')
plt.plot(newx, newf, marker='.', label='interpolated');
plt.legend();
plt.xlabel('x')
plt.ylabel('f(x)');
plt.plot(newx, np.abs(np.sin(newx)-sin_approx(newx)))
plt.xlabel('x')
plt.ylabel('Absolute error');
x = 4*np.pi*np.random.rand(15)
f = np.sin(x)
sin_approx = interp1d(x, f, kind='cubic')
# We have to be careful about not interpolating outside the range
newx = np.linspace(np.min(x), np.max(x),100)
newf = sin_approx(newx)
plt.plot(x, f, marker='o', linestyle='', label='original data')
plt.plot(newx, newf, marker='.', label='interpolated');
plt.legend();
plt.xlabel('x')
plt.ylabel('f(x)');
plt.plot(newx, np.abs(np.sin(newx)-sin_approx(newx)))
plt.xlabel('x')
plt.ylabel('Absolute error');
from scipy.interpolate import interp2d
def wave2d(x, y):
return np.sin(2*np.pi*x)*np.sin(3*np.pi*y)
x = np.linspace(0.0, 1.0, 10)
y = np.linspace(0.0, 1.0, 10)
X, Y = np.meshgrid(x, y)
Z = wave2d(X, Y)
Z
plt.pcolor(X, Y, Z)
plt.colorbar();
plt.scatter(X, Y);
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
wave2d_approx = interp2d(X, Y, Z, kind='cubic')
xnew = np.linspace(0.0, 1.0, 40)
ynew = np.linspace(0.0, 1.0, 40)
Xnew, Ynew = np.meshgrid(xnew, ynew) # We will use these in the scatter plot below
Fnew = wave2d_approx(xnew, ynew) # The interpolating function automatically creates the meshgrid!
Fnew.shape
plt.pcolor(xnew, ynew, Fnew);
plt.colorbar();
plt.scatter(X, Y, label='original points')
plt.scatter(Xnew, Ynew, marker='.', color='green', label='interpolated points')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
plt.legend(bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.);
from scipy.interpolate import griddata
x = np.random.rand(100)
y = np.random.rand(100)
f = wave2d(x, y)
plt.scatter(x, y);
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
xnew = np.linspace(x.min(), x.max(), 40)
ynew = np.linspace(y.min(), y.max(), 40)
Xnew, Ynew = np.meshgrid(xnew, ynew)
Xnew.shape, Ynew.shape
Fnew = griddata((x,y), f, (Xnew, Ynew), method='cubic', fill_value=0.0)
Fnew.shape
plt.pcolor(Xnew, Ynew, Fnew, label="points")
plt.colorbar()
plt.scatter(x, y, label='original points')
plt.scatter(Xnew, Ynew, marker='.', color='green', label='interpolated points')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
plt.legend(bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: This creates a new array of points that are the values of $\sin(x_i)$ at each point $x_i$
Step3: This plot shows that the points in this numerical array are an approximation to the actual function as they don't have the function's value at all possible points. In this case we know the actual function ($\sin(x)$). What if we only know the value of the function at a limited set of points, and don't know the analytical form of the function itself? This is common when the data points come from a set of measurements.
Step4: Let's create the numerical data we will use to build our interpolation.
Step5: To create our approximate function, we call interp1d as follows, with the numerical data. Options for the kind argument includes
Step6: The sin_approx variabl that interp1d returns is a callable object that can be used to compute the approximate function at other points. Compute the approximate function on a fine grid
Step7: Plot the original data points, along with the approximate interpolated values. It is quite amazing to see how the interpolation has done a good job of reconstructing the actual function with relatively few points.
Step8: Let's look at the absolute error between the actual function and the approximate interpolated function
Step9: 1d non-regular data
Step10: Notice how the absolute error is larger in the intervals where there are no points.
Step11: Here is the actual function we will use the generate our original dataset
Step12: Build 1d arrays to use as the structured grid
Step13: Build 2d arrays to use in computing the function on the grid points
Step14: Here is a scatter plot of the points overlayed with the value of the function at those points
Step15: You can see in this plot that the function is not smooth as we don't have its value on a fine grid.
Step16: Compute the interpolated function on a fine grid
Step17: Plot the original course grid of points, along with the interpolated function values on a fine grid
Step18: Notice how the interpolated values (green points) are now smooth and continuous. The amazing thing is that the interpolation algorithm doesn't know anything about the actual function. It creates this nice approximation using only the original course grid (blue points).
Step19: There is an important difference between griddata and the interp1d/interp2d
Step20: Notice how we pass these 1d arrays to our function and don't use meshgrid
Step21: It is clear that our grid is very unstructured
Step22: To use griddata we need to compute the final (strcutured) grid we want to compute the interpolated function on
|
10,274
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def well2d(x, y, nx, ny, L=1.0):
z = (2 / L) * np.sin((nx * np.pi * x) / L) * np.sin((ny * np.pi * y) / L)
return z
psi = well2d(np.linspace(0,1,10), np.linspace(0,1,10), 1, 1)
assert len(psi)==10
assert psi.shape==(10,)
plt.contour(well2d(x, y, 3, 2, 0))
assert True # use this cell for grading the contour plot
# YOUR CODE HERE
raise NotImplementedError()
assert True # use this cell for grading the pcolor plot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Contour plots of 2d wavefunctions
Step2: The contour, contourf, pcolor and pcolormesh functions of Matplotlib can be used for effective visualizations of 2d scalar fields. Use the Matplotlib documentation to learn how to use these functions along with the numpy.meshgrid function to visualize the above wavefunction
Step3: Next make a visualization using one of the pcolor functions
|
10,275
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
assert os.environ['COLAB_TPU_ADDR'], 'Make sure to select TPU from Edit > Notebook settings > Hardware accelerator'
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from absl import logging
logging.set_verbosity(logging.ERROR)
# Initialize TPU Strategy.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
y_train, y_test = y_train.astype(np.int32), y_test.astype(np.int32)
def show_img(img):
plt.figure()
plt.imshow(img)
plt.grid(False)
plt.show()
img = 0
show_img(x_test[img].reshape(28, 28))
NUM_CLASSES = 10
# input image dimensions
IMG_ROWS, IMG_COLS = 28, 28
x_train = x_train.reshape(x_train.shape[0], IMG_ROWS, IMG_COLS, 1)
x_test = x_test.reshape(x_test.shape[0], IMG_ROWS, IMG_COLS, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train = x_train / 255.0
x_test = x_test / 255.0
x_train_corners = x_train[:, :14, :14, :]
x_test_corners = x_test[:, :14, :14, :]
show_img(x_test_corners[img].reshape(14, 14))
def get_model(input_shape):
ip = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Flatten()(ip)
x = tf.keras.layers.Dense(NUM_CLASSES, activation='sigmoid')(x)
model = tf.keras.models.Model(ip, x)
return model
with strategy.scope():
model0 = get_model(x_train_corners[0].shape)
model0.compile(
optimizer=tf.optimizers.SGD(learning_rate=0.05),
loss=tf.losses.SparseCategoricalCrossentropy(),
metrics=[tf.metrics.SparseCategoricalAccuracy()])
model0.fit(x_train_corners, y_train, epochs=3, batch_size=128)
model0.evaluate(x_test_corners, y_test)
def get_autoencoder_and_encoder(input_shape):
ip = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')(ip)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = tf.keras.layers.Conv2D(1, (3, 3), activation='relu', padding='same')(x)
encoded = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = tf.keras.layers.Conv2DTranspose(1, (3, 3), activation='relu', strides=2, padding='same')(encoded)
x = tf.keras.layers.Conv2DTranspose(32, (3, 3), activation='relu', strides=2, padding='same')(x)
decoded = tf.keras.layers.Conv2DTranspose(1, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = tf.keras.models.Model(ip, outputs=decoded)
encoder = tf.keras.models.Model(ip, encoded)
return autoencoder, encoder
tf.keras.backend.clear_session()
with strategy.scope():
autoencoder, encoder = get_autoencoder_and_encoder(x_train[0].shape)
autoencoder.compile(
optimizer=tf.optimizers.Adam(),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.metrics.BinaryAccuracy()])
autoencoder.fit(
x_train,
x_train,
batch_size=128,
epochs=3,
steps_per_epoch=468,
validation_data=(x_test, x_test))
x_train_embeddings = encoder.predict(x_train)
x_test_embeddings = encoder.predict(x_test)
x_test_hat = autoencoder.predict(x_test[:8])
show_img(x_test_hat[img].reshape(28, 28))
show_img(x_test[0].reshape(28, 28))
show_img(x_test_embeddings[0].reshape(7, 7))
x_train_augmented = np.concatenate([x_train_corners.reshape(60000, 14*14, 1), x_train_embeddings.reshape(60000, 7*7, 1)], axis=1)
x_test_augmented = np.concatenate([x_test_corners.reshape(10000, 14*14, 1), x_test_embeddings.reshape(10000, 7*7, 1)], axis=1)
with strategy.scope():
model1 = get_model(x_train_augmented[0].shape)
model1.compile(
optimizer=tf.optimizers.SGD(learning_rate=0.06),
loss=tf.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
model1.fit(x_train_augmented, y_train, epochs=3, batch_size=128)
model1.evaluate(x_test_augmented, y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train embeddings on TPU using Autoencoder
Step2: Get data
Step3: Function to visualize our images and pick the first image from the test set
Step4: The first image from the test set is the number 7
Step5: MNIST setup
Step6: Original model
Step7: The first image corner from the test set
Step8: Create a model with one fully-connected layer
Step9: Train and evaluate the fully-connected one layer model on CPU
Step10: Create an autoencoder and make sure to get back an encoder as well
Step11: Train the autoencoder on TPU
Step12: Produce image embeddings
Step13: Produce image reconstructions
Step14: Reconstructed number 7
Step15: Check the original image
Step16: Examine the embedding for the number 7
Step17: Augment the corners dataset
Step18: Retrain the original model
|
10,276
|
<ASSISTANT_TASK:>
Python Code:
tr = np.array(model.monitor.channels['valid_y_y_1_nll'].time_record) / 3600.
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(111)
ax1.plot(model.monitor.channels['valid_y_y_1_nll'].val_record)
ax1.plot(model.monitor.channels['train_y_y_1_nll'].val_record)
ax1.plot(model_no_mom.monitor.channels['valid_y_y_1_nll'].val_record)
ax1.plot(model_no_mom.monitor.channels['train_y_y_1_nll'].val_record)
ax1.set_xlabel('Epochs')
ax1.legend(['Valid', 'Train', 'Valid (no mom.)', 'Train (no mom.)'])
ax1.set_ylabel('NLL')
ax1.set_ylim(0., 5.)
ax1.grid(True)
ax2 = ax1.twiny()
ax2.set_xticks(np.arange(0,tr.shape[0],20))
ax2.set_xticklabels(['{0:.2f}'.format(t) for t in tr[::20]])
ax2.set_xlabel('Hours')
plt.plot(model.monitor.channels['train_term_1_l1_penalty'].val_record)
plt.plot(model.monitor.channels['train_term_2_weight_decay'].val_record)
pv = get_weights_report(model=model)
img = pv.get_img()
img = img.resize((4*img.size[0], 4*img.size[1]))
img_data = io.BytesIO()
img.save(img_data, format='png')
display(Image(data=img_data.getvalue(), format='png'))
plt.plot(model.monitor.channels['learning_rate'].val_record)
h1_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h1_W_kernel_norm_mean'].val_record])
h1_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h1_kernel_norms_mean'].val_record])
plt.plot(h1_W_norms / h1_W_up_norms)
#plt.ylim(0,1000)
plt.show()
plt.plot(model.monitor.channels['valid_h1_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h1_kernel_norms_max'].val_record)
h2_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h2_W_kernel_norm_mean'].val_record])
h2_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h2_kernel_norms_mean'].val_record])
plt.plot(h2_W_norms / h2_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h2_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h2_kernel_norms_max'].val_record)
h3_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h3_W_kernel_norm_mean'].val_record])
h3_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h3_kernel_norms_mean'].val_record])
plt.plot(h3_W_norms / h3_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h3_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h3_kernel_norms_max'].val_record)
h4_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h4_W_kernel_norm_mean'].val_record])
h4_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h4_kernel_norms_mean'].val_record])
plt.plot(h4_W_norms / h4_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h4_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h4_kernel_norms_max'].val_record)
h5_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h5_W_kernel_norm_mean'].val_record])
h5_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h5_kernel_norms_mean'].val_record])
plt.plot(h5_W_norms / h5_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h5_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h5_kernel_norms_max'].val_record)
h6_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h6_W_col_norm_mean'].val_record])
h6_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h6_col_norms_mean'].val_record])
plt.plot(h6_W_norms / h6_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h6_col_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h6_col_norms_max'].val_record)
y_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_softmax_W_col_norm_mean'].val_record])
y_W_norms = np.array([float(v) for v in model.monitor.channels['valid_y_y_1_col_norms_mean'].val_record])
plt.plot(y_W_norms / y_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_y_y_1_col_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_y_y_1_col_norms_max'].val_record)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot ratio of update norms to parameter norms across epochs for different layers
|
10,277
|
<ASSISTANT_TASK:>
Python Code:
def soma( x, y):
s = x + y
return s
r = soma(50, 20)
print (r)
def soma( x, y, squared=False):
if squared:
s = (x + y)**2
else:
s = (x + y)
return s
print ('soma(2, 3):', soma(2, 3))
print ('soma(2, 3, False):', soma(2, 3, False))
print ('soma(2, 3, True):', soma(2, 3, True))
print ('soma(2, 3, squared= True):', soma(2, 3, squared= True))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Para se realizar a chamada da função soma, basta utilizá-la pelo seu nome passando os parâmetros como argumentos da função. Veja o exemplo a seguir
Step2: Parâmetros da função
Step3: Observe que os parâmetros, x e y são posicionais e serão os 2 primeiros argumentos da chamada da função. O terceiro parâmetro é por palavra chave e portanto opcional, posso usá-lo tanto na forma posicional, como na forma explícita com a palavra chave. A grande vantagem neste esquema é que posso ter um grande número de parâmetros com palavra chave e na hora de usar a função deixar explicitamente só os parâmetros desejados.
|
10,278
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv('tensile_test_data.csv', )
df.head()
df = pd.read_csv('tensile_test_data.csv', header=None)
df.head()
df.plot(x=2, y=1)
df = pd.read_excel('weather_data.xlsx')
df.head()
df = pd.read_excel('weather_data.xlsx', header=None)
df.head()
df2 = df.transpose()
df2.head()
df2.replace(-99999, np.nan, inplace=True)
df2.tail()
#df2.plot()
#df.sum()
df2.sum().plot()
df = pd.read_json('json_data.json')
df.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This gives us some strange column names. '237.7605198' is one of the values in the data set, not the column name. We need to specify header=None as an optional arguemnt, to ensure that pandas doesn't use the first row of our data set as column headers.
Step2: Now for a quick plot. The x-values will be from the 3rd column (which has a header '2') and our y-values will come from the second column (which has a header '1'). Remember that Python starts counting at zero, not 1.
Step3: Next let's read in an Excel file. Excel files have an .xlsx or .xls file extension. We use the pandas function pd.read_excel().
Step4: Again, we have the problem with the headers. Pandas is using the first row of our excel sheet as the column headers in our dataframe. Including header=None will add sequential column numbers and make the first row of the datafram from the first row of the excel sheet.
|
10,279
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
logger = phoebe.logger()
b = phoebe.default_binary()
b.add_dataset('lc', compute_phases=phoebe.linspace(0,1,101))
b.run_compute(irrad_method='none')
times = b.get_value('times', context='model')
fluxes = b.get_value('fluxes', context='model') + np.random.normal(size=times.shape) * 0.01
sigmas = np.ones_like(times) * 0.05
b = phoebe.default_binary()
b.set_value('q', 0.8)
b.set_value('ecc', 0.1)
b.set_value('incl@orbit', 80)
b.set_value('irrad_method', 'none')
b.add_dataset('orb', compute_times=np.linspace(0,4,1000), dataset='orb01', component=['primary', 'secondary'])
b.add_dataset('lc', times=times, fluxes=fluxes, sigmas=sigmas, dataset='lc01')
b.run_compute(irrad_method='none')
afig, mplfig = b.plot(show=True)
afig, mplfig = b.filter(dataset='lc01').plot(show=True)
afig, mplfig = b.plot(dataset='lc01', show=True)
afig, mplfig = b.filter(dataset='orb01').plot(x='times', y='vus', show=True)
b.filter(context='model', dataset='orb01').qualifiers
afig, mplfig = b.plot(dataset='lc01', x='phases', z=0, show=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This first line is only necessary for ipython noteboooks - it allows the plots to be shown on this page instead of in interactive mode. Depending on your version of Jupyter, Python, and matplotlib - you may or may not need this line in order to see plots in the notebook.
Step2: First we're going to create some fake observations so that we can show how to plot observational data. In real life, we would use something like np.loadtxt to get arrays from a data file instead.
Step3: Now we'll create a new Bundle and attach an orbit dataset (without observations) and a light curve dataset (with our "fake" observations - see Datasets for more details)
Step4: And run a forward model. See Computing Observables for more details.
Step5: Showing and Saving
Step6: Any call to plot returns two objects - the autofig and matplotlib figure instances. Generally we won't need to do anything with these, but having them returned could come in handy if you want to manually edit either before drawing/saving the image.
Step7: Selecting Arrays
Step8: To see the list of available qualifiers that could be passed for x or y, call the qualifiers (or twigs) property on the ParameterSet.
Step9: For more information on each of the available arrays, see the relevant tutorial on that dataset method
|
10,280
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
# Required coverage level for analysis. This is in units of number of apatamer
# particles (beads). This is used to minimize potential contamination.
# For example, a tolerated bead fraction of 0.2 means that if, based on read
# depth and number of beads, there are 100 reads expected per bead, then
# sequences with fewer than 20 reads would be excluded from analysis.
TOLERATED_BEAD_FRAC = 0.2
# Ratio cutoff between positive and negative pools to count as being real.
# The ratio is calculated normalized by read depth, so if the ratio is 0.5,
# then positive sequences are expected to have equal read depth (or more) in
# the positive pool as the negative pool. So, as a toy example, if the
# positive pool had 100 reads total and the negative pool had 200 reads total,
# then a sequence with 5 reads in the positive pool and 10 reads in the
# negative pool would have a ratio of 0.5.
POS_NEG_RATIO_CUTOFF = 0.5
# Minimum required reads (when 0 it uses only the above filters)
MIN_READ_THRESH = 0
#@title Original PD Data Parameters
# Since these are small I'm going to embed in the colab.
apt_screened_list = [ 2.4*10**6, 2.4*10**6, 1.24*10**6]
apt_collected_list = [3.5 * 10**4, 8.5 * 10**4, 8 * 10**4]
seq_input = [10**5] * 3
conditions = ['round2_high_no_serum_positive',
'round2_medium_no_serum_positive',
'round2_low_no_serum_positive']
flags = ['round2_high_no_serum_flag', 'round2_medium_no_serum_flag',
'round2_low_no_serum_flag']
stringency = ['High', 'Medium', 'Low']
pd_param_df = pd.DataFrame.from_dict({'apt_screened': apt_screened_list,
'apt_collected': apt_collected_list,
'seq_input': seq_input,
'condition': conditions,
'condition_flag': flags,
'stringency': stringency})
pd_param_df
#@title MLPD Data Parameters
apt_screened_list = [ 3283890.016, 6628573.952, 5801469.696, 3508412.512]
apt_collected_list = [12204, 50353, 153845, 201255]
seq_input = [200000] * 4
conditions = ['round1_very_positive',
'round1_high_positive',
'round1_medium_positive',
'round1_low_positive']
flags = ['round1_very_flag', 'round1_high_flag', 'round1_medium_flag',
'round1_low_flag']
stringency = ['Very High', 'High', 'Medium', 'Low']
mlpd_param_df = pd.DataFrame.from_dict({'apt_screened': apt_screened_list,
'apt_collected': apt_collected_list,
'seq_input': seq_input,
'condition': conditions,
'condition_flag': flags,
'stringency': stringency})
mlpd_param_df
# PD and MLPD sequencing counts across experiments
# Upload pd_clustered_input_data_manuscript.csv and mlpd_input_data_manuscript.csv
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
# Load PD Data
with open('pd_clustered_input_data_manuscript.csv') as f:
pd_input_df = pd.read_csv(f)
# Load MLPD data
with open('mlpd_input_data_manuscript.csv') as f:
mlpd_input_df = pd.read_csv(f)
def generate_cutoffs_via_PD_stats(df, col, apt_screened, apt_collected, seq_input,
tolerated_bead_frac, min_read_thresh):
Use the experimental parameters to determine sequences passing thresholds.
Args:
df: Pandas dataframe with experiment results. Must have columns named
after the col function parameter, containing the read count, and a
column 'sequence'.
col: The string name of the column in the experiment dataframe with the
read count.
apt_screened: The integer number of aptamers screened, from the experiment
parameters.
apt_collected: The integer number of aptamers collected, from the experiment
parameters.
seq_input: The integer number of unique sequences in the sequence library
used to construct the aptamer particles.
tolerated_bead_frac: The float tolerated bead fraction threshold. In other
words, the sequencing depth required to keep a sequence, in units of
fractions of a bead based on the average expected read depth per bead.
min_read_threshold: The integer minimum number of reads that a sequence
must have in order not to be filtered.
Returns:
Pandas series of the sequences from the dataframe that pass filter.
expected_bead_coverage = apt_screened / seq_input
tolerated_bead_coverage = expected_bead_coverage * tolerated_bead_frac
bead_full_min_sequence_coverage = (1. / apt_collected) * tolerated_bead_coverage
col_sum = df[col].sum()
# Look at sequenced counts calculated observed fraction of pool and raw count.
seqs = df[((df[col]/col_sum) > bead_full_min_sequence_coverage) & # Pool frac.
(df[col] > min_read_thresh) # Raw count
].sequence
return seqs
def generate_pos_neg_normalized_ratio(df, col_prefix):
Adds fraction columns to the dataframe with the calculated pos/neg ratio.
Args:
df: Pandas dataframe, expected to have columns [col_prefix]_positive and
[col_prefix]_negative contain read counts for the positive and negative
selection conditions, respectively.
col_prefix: String prefix of the columns to use to calculate the ratio.
For example 'round1_very_positive'.
Returns:
The original dataframe with three new columns:
[col_prefix]_positive_frac contains the fraction of the total positive
pool that is this sequence.
[col_prefix]_negative_frac contains the fraction of the total negative
pool that is this sequence.
[col_prefix]_pos_neg_ratio: The read-depth normalized fraction of the
sequence that ended in the positive pool.
col_pos = col_prefix + '_' + 'positive'
col_neg = col_prefix + '_' + 'negative'
df[col_pos + '_frac'] = df[col_pos] / df[col_pos].sum()
df[col_neg + '_frac'] = df[col_neg] / df[col_neg].sum()
df[col_prefix + '_pos_neg_ratio'] = df[col_pos + '_frac'] / (
df[col_pos + '_frac'] + df[col_neg + '_frac'])
return df
def build_seq_sets_from_df (input_param_df, input_df, tolerated_bead_frac,
pos_neg_ratio, min_read_thresh):
Sets flags for sequences based on whether they clear stringencies.
This function adds a column 'seq_set' to the input_param_df (one row per
stringency level of a particle display experiment) containing all the
sequences in the experiment that passed that stringency level in the
experiment.
Args:
input_param_df: Pandas dataframe with experimental parameters. Expected
to have one row per stringency level in the experiment and
columns 'apt_screened', 'apt_collected', 'seq_input', 'condition', and
'condition_flag'.
input_df: Pandas dataframe with the experimental results (counts per
sequence) for the experiment covered in the input_param_df. Expected
to have a [col_prefix]_pos_neg_ratio column for each row of the
input_param_df (i.e. each stringency level).
tolerated_bead_frac: Float representing the minimum sequence depth, in
units of expected beads, for a sequence to be used in analysis.
pos_neg_ratio: The threshold for the pos_neg_ratio column for a sequence
to be used in the analysis.
min_read_thresh: The integer minimum number of reads for a sequence to
be used in the analysis (not normalized, a straight count.)
Returns:
Nothing.
for _, row in input_param_df.iterrows():
# Get parameters to calculate bead fraction.
apt_screened = row['apt_screened']
apt_collected = row['apt_collected']
seq_input = row['seq_input']
condition = row['condition']
flag = row['condition_flag']
# Get sequences above tolerated_bead_frac in positive pool.
tolerated_bead_frac_seqs = generate_cutoffs_via_PD_stats(
input_df, condition, apt_screened, apt_collected, seq_input,
tolerated_bead_frac, min_read_thresh)
# Intersect with seqs > normalized positive sequencing count ratio.
condition_pre = condition.split('_positive')[0]
ratio_col = '%s_pos_neg_ratio' % (condition_pre)
pos_frac_seqs = input_df[input_df[ratio_col] > pos_neg_ratio].sequence
seqs = set(tolerated_bead_frac_seqs) & set(pos_frac_seqs)
input_df[flag] = input_df.sequence.isin(set(seqs))
#@title Add positive_frac / (positive_frac + negative_frac) col to df
for col_prefix in ['round1_very', 'round1_high', 'round1_medium', 'round1_low']:
mlpd_input_df = generate_pos_neg_normalized_ratio(mlpd_input_df, col_prefix)
for col_prefix in ['round2_high_no_serum', 'round2_medium_no_serum', 'round2_low_no_serum']:
pd_input_df = generate_pos_neg_normalized_ratio(pd_input_df, col_prefix)
#@title Measure consistency of particle display data when increasing stringency thresholds within each experimental set (i.e PD and MLPD)
build_seq_sets_from_df(pd_param_df, pd_input_df, TOLERATED_BEAD_FRAC,
POS_NEG_RATIO_CUTOFF, MIN_READ_THRESH)
build_seq_sets_from_df(mlpd_param_df, mlpd_input_df, TOLERATED_BEAD_FRAC,
POS_NEG_RATIO_CUTOFF, MIN_READ_THRESH)
#@title Figure 2B Raw Data
pd_input_df.groupby('round2_low_no_serum_flag round2_medium_no_serum_flag round2_high_no_serum_flag'.split()).count()[['sequence']]
#@title Figure 2C Raw Data
# To build venn (green), sum preceding True flags to get consistent sets
# 512 nM = 5426+3 = 5429
# 512 & 128 nM = 2360+15 = 2375
# 512 & 128 & 32nM (including 8 nM) = 276+84 = 360
# To build venn (grey) Inconsistent flags are summed (ignoring 8nM)
# 128 nM only = 185 + 1 = 186
# 128 nM & 32 nM = 12+1 = 13
# 32 nM only = 2
# 32 nM and 512 nM only = 22+1 = 23
#
# To build pie, look at all round1_very_flags = True
# Green = 84
# Grey = 15+1+3+1+1 = 21
mlpd_input_df.groupby('round1_low_flag round1_medium_flag round1_high_flag round1_very_flag'.split()).count()[['sequence']]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameters used in Manuscript
Step2: Load in data
Step3: Load CSVs
Step7: Helper functions
Step8: Data Analysis
Step9: Generate Figure Data
|
10,281
|
<ASSISTANT_TASK:>
Python Code:
mps_to_mmph = 1000 * 3600
import numpy as np
n_steps = 10 # can get from cfg file
precip_rates = np.linspace(5, 20, num=n_steps, endpoint=False)
precip_rates
np.savetxt('./input/precip_rates.txt', precip_rates, fmt='%6.2f')
cat input/precip_rates.txt
from topoflow.components.met_base import met_component
m = met_component()
m.initialize('./input/meteorology-1.cfg')
m.h_snow = 0.0 # Needed for update
precip = m.get_value('atmosphere_water__precipitation_leq-volume_flux') # `P` internally
print type(precip)
print precip.size
precip * mps_to_mmph
m.update()
print '\nCurrent time: {} s'.format(m.get_current_time())
print precip * mps_to_mmph # note that this is a reference, so it'll take the current value of `P`
time = [m.get_current_time().copy()]
flux = [precip.copy() * mps_to_mmph]
while m.get_current_time() < m.get_end_time():
m.update()
time.append(m.get_current_time().copy())
flux.append(m.get_value('atmosphere_water__precipitation_leq-volume_flux').copy() * mps_to_mmph)
time
flux
from cmt.components import Meteorology
met = Meteorology()
%cd input
met.initialize('meteorology-1.cfg')
bprecip = met.get_value('atmosphere_water__precipitation_leq-volume_flux')
print type(bprecip)
print bprecip.size
print bprecip.shape
bprecip * mps_to_mmph
time = [met.get_current_time()]
flux = [bprecip.max() * mps_to_mmph]
count = 1
while met.get_current_time() < met.get_end_time():
met.update(met.get_time_step()*count)
time.append(met.get_current_time())
flux.append(met.get_value('atmosphere_water__precipitation_leq-volume_flux').max() * mps_to_mmph)
count += 1
time
flux
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Programmatically create a file holding the precipitation rate time series. This will mimic what I'll need to do in WMT, where I'll have access to the model time step and run duration. Start by defining the precipitation rate values
Step2: Next, write the values to a file to the input directory, where it's expected by the cfg file
Step3: Check the file
Step4: BMI component
Step5: Initialize the model. A value of snow depth h_snow is needed for the model to update.
Step6: Unlike when P is a scalar, the initial model precipitation volume flux is the first value from precip_rates.txt
Step7: Advance the model by one time step
Step8: Unlike the scalar case, there's an output volume flux of precipitation
Step9: Advance the model to the end, saving the model time and output P values (converted back to mm/hr for convenience) at each step
Step10: Check the time and flux values
Step11: Result
Step12: Initialize the model.
Step13: The initial model precipitation volume flux is the first value from precip_rates.txt
Step14: Advance the model to the end, saving the model time and output P values (converted back to mm/hr for convenience) at each step
Step15: Check the time and flux values (noting that I've included the time = 0.0 value here)
|
10,282
|
<ASSISTANT_TASK:>
Python Code:
# print all urls
import yaml
import io
val = yaml.safe_load(io.open("example_config.yaml", "rt"))
print([entry["url"] for entry in val["handlers"]])
# print all urls
import axon
val = axon.load("example_config1.axon")
print([entry["url"] for entry in val["handlers"]])
# print all urls
vals = axon.load("example_config2.axon")
print([entry.url for entry in vals[0].handlers])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In AXON it will be formatted as
Step2: With AXON it can be also presented in the following form
|
10,283
|
<ASSISTANT_TASK:>
Python Code:
class Point:
Represents a point in a 2D Euclidean plane.
def __init__(self, x, y):
self.x = x
self.y = y
@property
def tuplify(self):
return self.x, self.y
def __lt__(self, other):
return self.tuplify < other.tuplify
def __repr__(self):
return str((self.x, self.y))
def orientation(p1, p2, p3):
We are given the triangle spanned on the points p1, p2 and p3.
Return positive number if the triangle is positively oriented.
Return positive number if the triangle is negatively oriented.
Returns 0 if the triangle is degenerated.
a = (p2.x - p1.x, p2.y - p1.y)
b = (p3.x - p1.x, p3.y - p1.y)
return a[0]*b[1] - b[0]*a[1]
a = Point(0, 0)
b = Point(1, 0)
c = Point(0, 1)
d = Point(2, 0)
assert orientation(a, b, c) > 0
assert orientation(a, b, d) == 0
assert orientation(a, c, b) < 0
class Node:
Node in a doubly linked link list.
def __init__(self, data):
self.data = data
self.p, self.n = self, self
def link_after(self, new_node):
'''
Insert new_node into linked list after the self node.
'''
new_node.n, new_node.p = self.n, self
self.n.p, self.n = new_node, new_node
def __repr__(self):
ret = "{0}".format(self.data)
c = self
while c.n != self:
c = c.n
ret += " <-> {0}".format(c.data)
return ret
from random import random
def generify(points, epsilon=0.0001):
Jiggle the points coordinates just a little so that they are in the
general position.
Returns the generator.
for point in points:
e = 2*(random()-0.5)*epsilon
d = 2*(random()-0.5)*epsilon*e
yield Point(point.x + e, point.y + d)
def triangulation(points):
'''
Construct line sweep triangulation.
'''
assert len(points) >= 3, 'Triangulation requires at least 3 points'
points = sorted(generify((Point(x, y) for (x,y) in points)))
nodes = [Node(point) for point in points]
triangles = []
triangles.append(points[:3])
if orientation(*triangles[0]) < 0:
nodes[0], nodes[1] = nodes[1], nodes[0]
nodes[0].link_after(nodes[1]), nodes[1].link_after(nodes[2])
for i in range(3, len(nodes)):
print(nodes[i-1])
add_node(nodes[i-1], nodes[i], triangles)
print(nodes[i])
print()
return triangles
def add_node(start, new, triangles):
'''
Process one node (containing point), add appropriate triangles.
'''
current = start
print('t1', new.data, current.n.data, current.data)
while orientation(new.data, current.n.data, current.data) > 0:
print('a1', new.data, current.n.data, current.data)
triangles.append([new.data, current.n.data, current.data])
current = current.n
print('t1', new.data, current.n.data, current.data)
last, current = current, start
print('t2', new.data, current.data, current.p.data)
while orientation(new.data, current.data, current.p.data) > 0:
print('a2', new.data, current.data, current.p.data)
triangles.append([new.data, current.data, current.p.data])
current = current.p
print('t2', new.data, current.data, current.p.data)
print (last.data)
print (current.data)
new.n = last
current.link_after(new)
from matplotlib import pyplot
%matplotlib notebook
points = [(1,3),(6,2),(0.5,3),(0,4),(8,1),(8,0)]
xs, ys = map(list, zip(*points))
pyplot.axis([min(xs)-1,max(xs)+1,min(xs)-1,max(ys)+1])
pyplot.plot( xs, ys, 'ro')
triangles = triangulation(points)
colors = "bgrcmykwbgrcmykwbgrcmykwbgrcmykw"
for i, (p1, p2, p3) in enumerate(triangles):
pyplot.plot([p1.x, p2.x], [p1.y, p2.y], c=colors[i], alpha=0.3)
pyplot.plot([p1.x, p3.x], [p1.y, p3.y], c=colors[i], alpha=0.3)
pyplot.plot([p2.x, p3.x], [p2.y, p3.y], c=colors[i], alpha=0.3)
from matplotlib import pyplot
from random import randint
%matplotlib notebook
#points = [(randint(1, 100), randint(1, 100)) for i in range(5)]
points = [(0,0),(3,9),(5,-1),(9,4), (7, -5)]
xs, ys = map(list, zip(*points))
pyplot.axis([min(xs)-1,max(xs)+1,min(ys)-1,max(ys)+1])
pyplot.plot( xs, ys, 'ro')
triangles = triangulation(points)
for i, (p1, p2, p3) in enumerate(triangles):
pyplot.plot([p1.x, p2.x], [p1.y, p2.y])
pyplot.plot([p1.x, p3.x], [p1.y, p3.y])
pyplot.plot([p2.x, p3.x], [p2.y, p3.y])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Triangulation with line sweep
Step5: Test the orientation method on a simple test case.
Step6: Now we can start with our main method.
Step7: Test the given method and plot the results.
|
10,284
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tensorflow-gpu==2.0.0-rc1
import pandas as pd
import tensorflow as tf
csv_file = tf.keras.utils.get_file('heart.csv', 'https://storage.googleapis.com/applied-dl/heart.csv')
df = pd.read_csv(csv_file)
df.head()
df.dtypes
df['thal'] = pd.Categorical(df['thal'])
df['thal'] = df.thal.cat.codes
df.head()
target = df.pop('target')
dataset = tf.data.Dataset.from_tensor_slices((df.values, target.values))
for feat, targ in dataset.take(5):
print ('Features: {}, Target: {}'.format(feat, targ))
tf.constant(df['thal'])
train_dataset = dataset.shuffle(len(df)).batch(1)
def get_compiled_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
return model
model = get_compiled_model()
model.fit(train_dataset, epochs=15)
inputs = {key: tf.keras.layers.Input(shape=(), name=key) for key in df.keys()}
x = tf.stack(list(inputs.values()), axis=-1)
x = tf.keras.layers.Dense(10, activation='relu')(x)
output = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model_func = tf.keras.Model(inputs=inputs, outputs=output)
model_func.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
dict_slices = tf.data.Dataset.from_tensor_slices((df.to_dict('list'), target.values)).batch(16)
for dict_slice in dict_slices.take(1):
print (dict_slice)
model_func.fit(dict_slices, epochs=15)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 使用 tf.data 加载 pandas dataframes
Step2: 下载包含心脏数据集的 csv 文件。
Step3: 使用 pandas 读取 csv 文件。
Step4: 将 thal 列(数据帧(dataframe)中的 object )转换为离散数值。
Step5: 使用 tf.data.Dataset 读取数据
Step6: 由于 pd.Series 实现了 __array__ 协议,因此几乎可以在任何使用 np.array 或 tf.Tensor 的地方透明地使用它。
Step7: 随机读取(shuffle)并批量处理数据集。
Step8: 创建并训练模型
Step9: 代替特征列
Step10: 与 tf.data 一起使用时,保存 pd.DataFrame 列结构的最简单方法是将 pd.DataFrame 转换为 dict ,并对该字典进行切片。
|
10,285
|
<ASSISTANT_TASK:>
Python Code:
print('Auch beim Maschinellen Lernen immer wichtig:' + '\n' +'Aufgabe und Daten umfassend kennenlernen')
%matplotlib inline
import numpy as np
x = np.array([[1, 2, 3], [4, 5, 6]])
print("x:\n{}".format(x))
from scipy import sparse
# create a 2d NumPy array with a diagonal of ones, and zeros everywhere else
eye = np.eye(4)
print("NumPy array:\n{}".format(eye))
# convert the NumPy array to a SciPy sparse matrix in CSR format
# only the non-zero entries are stored
sparse_matrix = sparse.csr_matrix(eye)
print("\nSciPy sparse CSR matrix:\n{}".format(sparse_matrix))
data = np.ones(4)
row_indices = np.arange(4)
col_indices = np.arange(4)
eye_coo = sparse.coo_matrix((data, (row_indices, col_indices)))
print("COO representation:\n{}".format(eye_coo))
%matplotlib inline
import matplotlib.pyplot as plt
# Generierung einer Zahlenreihe von -10 bis 10 in 100 Schritten
x = np.linspace(-10, 10, 100)
# Erzeugen eines Zweiten numpy arrays mit der Funktion sin()
y = np.sin(x)
# The plot function makes a line chart of one array against another
plt.plot(x, y, marker="o", color='brown')
plt.title('Sinus Kurve')
plt.xlabel('x')
plt.ylabel('y')
#plt.legend('sin x','upper left')
%matplotlib inline
import matplotlib.pyplot as plt
# Generierung einer Zahlenreihe von -10 bis 10 in 100 Schritten
x = np.linspace(-20, 20, 100)
# Erzeugen eines Zweiten numpy arrays mit der Funktion sin()
y = np.exp(x)
# The plot function makes a line chart of one array against another
plt.plot(x, y, marker="o", color='green')
plt.title('Exponential Kurve')
plt.xlabel('x')
plt.ylabel('y')
%matplotlib inline
import matplotlib.pyplot as plt
from numpy.random import randn
z = randn(100)
red_dot, = plt.plot(z, "ro", markersize=15)
# Schreibe ein weisses Kruez über einige der Daten.
white_cross, = plt.plot(z[:50], "w+", markeredgewidth=3, markersize=15)
plt.legend([red_dot, (red_dot, white_cross)], ["Attr A", "Attr A+B"])
import pandas as pd
from IPython.display import display
# create a simple dataset of people
data = {'Name': ["John", "Anna", "Peter", "Linda"],
'Location' : ["New York", "Paris", "Berlin", "London"],
'Age' : [24, 13, 53, 33]
}
data_pandas = pd.DataFrame(data)
# IPython.display allows "pretty printing" of dataframes
# in the Jupyter notebook
display(data_pandas)
# One of many possible ways to query the table:
# selecting all rows that have an age column greate than 30
display(data_pandas[data_pandas.Age > 30])
import sys
print("Python version: {}".format(sys.version))
import pandas as pd
print("pandas version: {}".format(pd.__version__))
import matplotlib
print("matplotlib version: {}".format(matplotlib.__version__))
import numpy as np
print("NumPy version: {}".format(np.__version__))
import scipy as sp
print("SciPy version: {}".format(sp.__version__))
import IPython
print("IPython version: {}".format(IPython.__version__))
import sklearn
print("scikit-learn version: {}".format(sklearn.__version__))
from sklearn.datasets import load_iris
iris_dataset = load_iris()
print("Keys of iris_dataset: {}".format(iris_dataset.keys()))
print(iris_dataset['DESCR'][:193] + "\n...")
print("Target names: {}".format(iris_dataset['target_names']))
print("Feature names: {}".format(iris_dataset['feature_names']))
print("Type of data: {}".format(type(iris_dataset['data'])))
print("Shape of data: {}".format(iris_dataset['data'].shape))
print("First five rows of data:\n{}".format(iris_dataset['data'][:5]))
print("Type of target: {}".format(type(iris_dataset['target'])))
print("Shape of target: {}".format(iris_dataset['target'].shape))
print("Shape of target: ")
print(iris_dataset['target'].shape)
print("Target:\n{}".format(iris_dataset['target']))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], random_state=1)
print("X_train shape: {}".format(X_train.shape))
print("y_train shape: {}".format(y_train.shape))
print("X_test shape: {}".format(X_test.shape))
print("y_test shape: {}".format(y_test.shape))
import mglearn
# create dataframe from data in X_train
# label the columns using the strings in iris_dataset.feature_names
iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)
# create a scatter matrix from the dataframe, color by y_train
pd.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o',hist_kwds={'bins': 20}, s=60, alpha=.8, cmap=mglearn.cm3)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
X_new = np.array([[5, 2.9, 1, 0.2]])
print("X_new.shape: {}".format(X_new.shape))
prediction = knn.predict(X_new)
print("Prediction: {}".format(prediction))
print("Predicted target name: {}".format(
iris_dataset['target_names'][prediction]))
y_pred = knn.predict(X_test)
print("Test set predictions:\n {}".format(y_pred))
print("Test set score: {:.2f}".format(np.mean(y_pred == y_test)))
print("Test set score: {:.2f}".format(knn.score(X_test, y_test)))
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], random_state=0)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
print("Test set score: {:.2f}".format(knn.score(X_test, y_test)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> Deshalb die Daten sich anzeigen lassen (print() etc.)</h2>
Step2: <h2> Warum setzen wir die Bibliothek Scikit-learn ein ?</h2>
Step3: <h3> Arbeiten mit SciPy </h3>
Step4: <h3> #matplotlib
Step5: <h3> Arbeiten mit pandas</h3>
Step6: Python2 versus Python3
Step7: Eine erste Anwendung
Step8: Wie wird der Erfolg gemessen
Step9: Immer zuerst
Step10: Ihr erstes Machine Learning Modell
Step11: Vorhersagen machen
Step12: Evaluation des Modells
Step13: Zusammenfassung und Ausblick
|
10,286
|
<ASSISTANT_TASK:>
Python Code:
# If you haven't already, make sure you install the `dfcx-scrapi` library
!pip install dfcx-scrapi
from dfcx_scrapi.core.project import Project
creds_path = '<YOUR_CREDS_PATH>'
project_id = '<YOUR_GCP_PROJECT_ID>'
gcs_bucket = '<YOUR_GCS_BUCKET>'
p = Project(creds_path, project_id=project_id)
all_agents = p.list_agents()
lro_list = p.backup_all_agents(gcs_bucket)
# Example of a single LRO Response
{'name': 'projects/<project name>/locations/global/operations/<lro id>',
'metadata': {'@type': 'type.googleapis.com/google.protobuf.Struct'},
'done': True,
'response': {'@type': 'type.googleapis.com/google.cloud.dialogflow.cx.v3beta1.ExportAgentResponse',
'agentUri': 'gs://<bucket name>/<agent display name'}}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports
Step2: User Inputs
Step3: Extract All Agents from GCP Project
Step5: Backup All Agents to GCS Bucket
|
10,287
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
%matplotlib inline
data = [446.6565, 454.4733, 455.663 , 423.6322, 456.2713, 440.5881, 425.3325, 485.1494, 506.0482, 526.792 , 514.2689, 494.211 ]
index= pd.date_range(start='1996', end='2008', freq='A')
oildata = pd.Series(data, index)
data = [17.5534, 21.86 , 23.8866, 26.9293, 26.8885, 28.8314, 30.0751, 30.9535, 30.1857, 31.5797, 32.5776, 33.4774, 39.0216, 41.3864, 41.5966]
index= pd.date_range(start='1990', end='2005', freq='A')
air = pd.Series(data, index)
data = [263.9177, 268.3072, 260.6626, 266.6394, 277.5158, 283.834 , 290.309 , 292.4742, 300.8307, 309.2867, 318.3311, 329.3724, 338.884 , 339.2441, 328.6006, 314.2554, 314.4597, 321.4138, 329.7893, 346.3852, 352.2979, 348.3705, 417.5629, 417.1236, 417.7495, 412.2339, 411.9468, 394.6971, 401.4993, 408.2705, 414.2428]
index= pd.date_range(start='1970', end='2001', freq='A')
livestock2 = pd.Series(data, index)
data = [407.9979 , 403.4608, 413.8249, 428.105 , 445.3387, 452.9942, 455.7402]
index= pd.date_range(start='2001', end='2008', freq='A')
livestock3 = pd.Series(data, index)
data = [41.7275, 24.0418, 32.3281, 37.3287, 46.2132, 29.3463, 36.4829, 42.9777, 48.9015, 31.1802, 37.7179, 40.4202, 51.2069, 31.8872, 40.9783, 43.7725, 55.5586, 33.8509, 42.0764, 45.6423, 59.7668, 35.1919, 44.3197, 47.9137]
index= pd.date_range(start='2005', end='2010-Q4', freq='QS-OCT')
aust = pd.Series(data, index)
ax=oildata.plot()
ax.set_xlabel("Year")
ax.set_ylabel("Oil (millions of tonnes)")
print("Figure 7.1: Oil production in Saudi Arabia from 1996 to 2007.")
fit1 = SimpleExpSmoothing(oildata, initialization_method="heuristic").fit(smoothing_level=0.2,optimized=False)
fcast1 = fit1.forecast(3).rename(r'$\alpha=0.2$')
fit2 = SimpleExpSmoothing(oildata, initialization_method="heuristic").fit(smoothing_level=0.6,optimized=False)
fcast2 = fit2.forecast(3).rename(r'$\alpha=0.6$')
fit3 = SimpleExpSmoothing(oildata, initialization_method="estimated").fit()
fcast3 = fit3.forecast(3).rename(r'$\alpha=%s$'%fit3.model.params['smoothing_level'])
plt.figure(figsize=(12, 8))
plt.plot(oildata, marker='o', color='black')
plt.plot(fit1.fittedvalues, marker='o', color='blue')
line1, = plt.plot(fcast1, marker='o', color='blue')
plt.plot(fit2.fittedvalues, marker='o', color='red')
line2, = plt.plot(fcast2, marker='o', color='red')
plt.plot(fit3.fittedvalues, marker='o', color='green')
line3, = plt.plot(fcast3, marker='o', color='green')
plt.legend([line1, line2, line3], [fcast1.name, fcast2.name, fcast3.name])
fit1 = Holt(air, initialization_method="estimated").fit(smoothing_level=0.8, smoothing_trend=0.2, optimized=False)
fcast1 = fit1.forecast(5).rename("Holt's linear trend")
fit2 = Holt(air, exponential=True, initialization_method="estimated").fit(smoothing_level=0.8, smoothing_trend=0.2, optimized=False)
fcast2 = fit2.forecast(5).rename("Exponential trend")
fit3 = Holt(air, damped_trend=True, initialization_method="estimated").fit(smoothing_level=0.8, smoothing_trend=0.2)
fcast3 = fit3.forecast(5).rename("Additive damped trend")
plt.figure(figsize=(12, 8))
plt.plot(air, marker='o', color='black')
plt.plot(fit1.fittedvalues, color='blue')
line1, = plt.plot(fcast1, marker='o', color='blue')
plt.plot(fit2.fittedvalues, color='red')
line2, = plt.plot(fcast2, marker='o', color='red')
plt.plot(fit3.fittedvalues, color='green')
line3, = plt.plot(fcast3, marker='o', color='green')
plt.legend([line1, line2, line3], [fcast1.name, fcast2.name, fcast3.name])
fit1 = SimpleExpSmoothing(livestock2, initialization_method="estimated").fit()
fit2 = Holt(livestock2, initialization_method="estimated").fit()
fit3 = Holt(livestock2,exponential=True, initialization_method="estimated").fit()
fit4 = Holt(livestock2,damped_trend=True, initialization_method="estimated").fit(damping_trend=0.98)
fit5 = Holt(livestock2,exponential=True, damped_trend=True, initialization_method="estimated").fit()
params = ['smoothing_level', 'smoothing_trend', 'damping_trend', 'initial_level', 'initial_trend']
results=pd.DataFrame(index=[r"$\alpha$",r"$\beta$",r"$\phi$",r"$l_0$","$b_0$","SSE"] ,columns=['SES', "Holt's","Exponential", "Additive", "Multiplicative"])
results["SES"] = [fit1.params[p] for p in params] + [fit1.sse]
results["Holt's"] = [fit2.params[p] for p in params] + [fit2.sse]
results["Exponential"] = [fit3.params[p] for p in params] + [fit3.sse]
results["Additive"] = [fit4.params[p] for p in params] + [fit4.sse]
results["Multiplicative"] = [fit5.params[p] for p in params] + [fit5.sse]
results
for fit in [fit2,fit4]:
pd.DataFrame(np.c_[fit.level,fit.trend]).rename(
columns={0:'level',1:'slope'}).plot(subplots=True)
plt.show()
print('Figure 7.4: Level and slope components for Holt’s linear trend method and the additive damped trend method.')
fit1 = SimpleExpSmoothing(livestock2, initialization_method="estimated").fit()
fcast1 = fit1.forecast(9).rename("SES")
fit2 = Holt(livestock2, initialization_method="estimated").fit()
fcast2 = fit2.forecast(9).rename("Holt's")
fit3 = Holt(livestock2, exponential=True, initialization_method="estimated").fit()
fcast3 = fit3.forecast(9).rename("Exponential")
fit4 = Holt(livestock2, damped_trend=True, initialization_method="estimated").fit(damping_trend=0.98)
fcast4 = fit4.forecast(9).rename("Additive Damped")
fit5 = Holt(livestock2, exponential=True, damped_trend=True, initialization_method="estimated").fit()
fcast5 = fit5.forecast(9).rename("Multiplicative Damped")
ax = livestock2.plot(color="black", marker="o", figsize=(12,8))
livestock3.plot(ax=ax, color="black", marker="o", legend=False)
fcast1.plot(ax=ax, color='red', legend=True)
fcast2.plot(ax=ax, color='green', legend=True)
fcast3.plot(ax=ax, color='blue', legend=True)
fcast4.plot(ax=ax, color='cyan', legend=True)
fcast5.plot(ax=ax, color='magenta', legend=True)
ax.set_ylabel('Livestock, sheep in Asia (millions)')
plt.show()
print('Figure 7.5: Forecasting livestock, sheep in Asia: comparing forecasting performance of non-seasonal methods.')
fit1 = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='add', use_boxcox=True, initialization_method="estimated").fit()
fit2 = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='mul', use_boxcox=True, initialization_method="estimated").fit()
fit3 = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='add', damped_trend=True, use_boxcox=True, initialization_method="estimated").fit()
fit4 = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='mul', damped_trend=True, use_boxcox=True, initialization_method="estimated").fit()
results=pd.DataFrame(index=[r"$\alpha$",r"$\beta$",r"$\phi$",r"$\gamma$",r"$l_0$","$b_0$","SSE"])
params = ['smoothing_level', 'smoothing_trend', 'damping_trend', 'smoothing_seasonal', 'initial_level', 'initial_trend']
results["Additive"] = [fit1.params[p] for p in params] + [fit1.sse]
results["Multiplicative"] = [fit2.params[p] for p in params] + [fit2.sse]
results["Additive Dam"] = [fit3.params[p] for p in params] + [fit3.sse]
results["Multiplica Dam"] = [fit4.params[p] for p in params] + [fit4.sse]
ax = aust.plot(figsize=(10,6), marker='o', color='black', title="Forecasts from Holt-Winters' multiplicative method" )
ax.set_ylabel("International visitor night in Australia (millions)")
ax.set_xlabel("Year")
fit1.fittedvalues.plot(ax=ax, style='--', color='red')
fit2.fittedvalues.plot(ax=ax, style='--', color='green')
fit1.forecast(8).rename('Holt-Winters (add-add-seasonal)').plot(ax=ax, style='--', marker='o', color='red', legend=True)
fit2.forecast(8).rename('Holt-Winters (add-mul-seasonal)').plot(ax=ax, style='--', marker='o', color='green', legend=True)
plt.show()
print("Figure 7.6: Forecasting international visitor nights in Australia using Holt-Winters method with both additive and multiplicative seasonality.")
results
fit1 = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='add', initialization_method="estimated").fit()
fit2 = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='mul', initialization_method="estimated").fit()
df = pd.DataFrame(np.c_[aust, fit1.level, fit1.trend, fit1.season, fit1.fittedvalues],
columns=[r'$y_t$',r'$l_t$',r'$b_t$',r'$s_t$',r'$\hat{y}_t$'],index=aust.index)
df.append(fit1.forecast(8).rename(r'$\hat{y}_t$').to_frame(), sort=True)
df = pd.DataFrame(np.c_[aust, fit2.level, fit2.trend, fit2.season, fit2.fittedvalues],
columns=[r'$y_t$',r'$l_t$',r'$b_t$',r'$s_t$',r'$\hat{y}_t$'],index=aust.index)
df.append(fit2.forecast(8).rename(r'$\hat{y}_t$').to_frame(), sort=True)
states1 = pd.DataFrame(np.c_[fit1.level, fit1.trend, fit1.season], columns=['level','slope','seasonal'], index=aust.index)
states2 = pd.DataFrame(np.c_[fit2.level, fit2.trend, fit2.season], columns=['level','slope','seasonal'], index=aust.index)
fig, [[ax1, ax4],[ax2, ax5], [ax3, ax6]] = plt.subplots(3, 2, figsize=(12,8))
states1[['level']].plot(ax=ax1)
states1[['slope']].plot(ax=ax2)
states1[['seasonal']].plot(ax=ax3)
states2[['level']].plot(ax=ax4)
states2[['slope']].plot(ax=ax5)
states2[['seasonal']].plot(ax=ax6)
plt.show()
fit = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='mul', initialization_method="estimated").fit()
simulations = fit.simulate(8, repetitions=100, error='mul')
ax = aust.plot(figsize=(10,6), marker='o', color='black',
title="Forecasts and simulations from Holt-Winters' multiplicative method" )
ax.set_ylabel("International visitor night in Australia (millions)")
ax.set_xlabel("Year")
fit.fittedvalues.plot(ax=ax, style='--', color='green')
simulations.plot(ax=ax, style='-', alpha=0.05, color='grey', legend=False)
fit.forecast(8).rename('Holt-Winters (add-mul-seasonal)').plot(ax=ax, style='--', marker='o', color='green', legend=True)
plt.show()
fit = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='mul', initialization_method="estimated").fit()
simulations = fit.simulate(16, anchor='2009-01-01', repetitions=100, error='mul', random_errors='bootstrap')
ax = aust.plot(figsize=(10,6), marker='o', color='black',
title="Forecasts and simulations from Holt-Winters' multiplicative method" )
ax.set_ylabel("International visitor night in Australia (millions)")
ax.set_xlabel("Year")
fit.fittedvalues.plot(ax=ax, style='--', color='green')
simulations.plot(ax=ax, style='-', alpha=0.05, color='grey', legend=False)
fit.forecast(8).rename('Holt-Winters (add-mul-seasonal)').plot(ax=ax, style='--', marker='o', color='green', legend=True)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple Exponential Smoothing
Step2: Here we run three variants of simple exponential smoothing
Step3: Holt's Method
Step4: Seasonally adjusted data
Step5: Plots of Seasonally Adjusted Data
Step6: Comparison
Step7: Holt's Winters Seasonal
Step8: The Internals
Step9: Finally lets look at the levels, slopes/trends and seasonal components of the models.
Step10: Simulations and Confidence Intervals
Step11: Simulations can also be started at different points in time, and there are multiple options for choosing the random noise.
|
10,288
|
<ASSISTANT_TASK:>
Python Code:
from quantopian.pipeline import Pipeline
def make_pipeline():
return Pipeline()
pipe = make_pipeline()
from quantopian.research import run_pipeline
result = run_pipeline(pipe, '2017-01-01', '2017-01-01')
result.head(10)
result.info()
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import BollingerBands,SimpleMovingAverage,EWMA
SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 30)
def make_pipeline():
mean_close_30 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 30)
return Pipeline(columns = {
'30 Day Mean Close':mean_close_30
})
results = run_pipeline(make_pipeline(),
'2017-01-01',
'2017-01-01')
results.head(20)
def make_pipeline():
mean_close_30 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 30)
latest_close = USEquityPricing.close.latest
return Pipeline(columns = {
'30 Day Mean Close':mean_close_30,
'Latest Close':latest_close
})
results = run_pipeline(make_pipeline(),
'2017-01-01',
'2017-01-01')
results.head(10)
def make_pipeline():
mean_close_10 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 10)
mean_close_30 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 30)
latest_close = USEquityPricing.close.latest
percent_difference = (mean_close_10-mean_close_30) / mean_close_30
return Pipeline(columns = {
'Percent Difference':percent_difference,
'30 Day Mean Close':mean_close_30,
'Latest Close':latest_close
})
results = run_pipeline(make_pipeline(),
'2017-01-01',
'2017-01-01')
results.head()
last_close_price = USEquityPricing.close.latest
close_price_filter = last_close_price > 20
close_price_filter
def make_pipeline():
mean_close_10 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 10)
mean_close_30 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 30)
latest_close = USEquityPricing.close.latest
percent_difference = (mean_close_10-mean_close_30) / mean_close_30
perc_diff_check = percent_difference > 0
return Pipeline(columns = {
'Percent Difference':percent_difference,
'30 Day Mean Close':mean_close_30,
'Latest Close':latest_close,
'Positive Percent Diff': perc_diff_check
})
results = run_pipeline(make_pipeline(),
'2017-01-01',
'2017-01-01')
results.head()
def make_pipeline():
mean_close_10 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 10)
mean_close_30 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 30)
latest_close = USEquityPricing.close.latest
percent_difference = (mean_close_10-mean_close_30) / mean_close_30
perc_diff_check = percent_difference > 0
return Pipeline(columns = {
'Percent Difference':percent_difference,
'30 Day Mean Close':mean_close_30,
'Latest Close':latest_close,
'Positive Percent Diff': perc_diff_check},
screen=perc_diff_check)
results = run_pipeline(make_pipeline(),
'2017-01-01',
'2017-01-01')
results.head()
def make_pipeline():
mean_close_10 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 10)
mean_close_30 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 30)
latest_close = USEquityPricing.close.latest
percent_difference = (mean_close_10-mean_close_30) / mean_close_30
perc_diff_check = percent_difference > 0
return Pipeline(columns = {
'Percent Difference':percent_difference,
'30 Day Mean Close':mean_close_30,
'Latest Close':latest_close,
'Positive Percent Diff': perc_diff_check},
screen = ~perc_diff_check)
results = run_pipeline(make_pipeline(),
'2017-01-01',
'2017-01-01')
results.head()
def make_pipeline():
mean_close_10 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 10)
mean_close_30 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 30)
latest_close = USEquityPricing.close.latest
percent_difference = (mean_close_10-mean_close_30) / mean_close_30
perc_diff_check = percent_difference > 0
small_price = latest_close < 5
final_filter = perc_diff_check & small_price
return Pipeline(columns = {
'Percent Difference':percent_difference,
'30 Day Mean Close':mean_close_30,
'Latest Close':latest_close,
'Positive Percent Diff': perc_diff_check},
screen = final_filter)
results = run_pipeline(make_pipeline(),
'2017-01-01',
'2017-01-01')
results.head()
def make_pipeline():
# Create Filters for Masks First
latest_close = USEquityPricing.close.latest
small_price = latest_close < 5
# Pass in the mask
mean_close_10 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 10,
mask = small_price)
mean_close_30 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 30,
mask = small_price)
percent_difference = (mean_close_10-mean_close_30) / mean_close_30
perc_diff_check = percent_difference > 0
final_filter = perc_diff_check
return Pipeline(columns = {
'Percent Difference':percent_difference,
'30 Day Mean Close':mean_close_30,
'Latest Close':latest_close,
'Positive Percent Diff': perc_diff_check},
screen = final_filter)
results = run_pipeline(make_pipeline(),
'2017-01-01',
'2017-01-01')
results.head()
len(results)
from quantopian.pipeline.data import morningstar
from quantopian.pipeline.classifiers.morningstar import Sector
morningstar_sector = Sector()
exchange = morningstar.share_class_reference.exchange_id.latest
exchange
nyse_filter = exchange.eq('NYS')
def make_pipeline():
# Create Filters for Masks First
latest_close = USEquityPricing.close.latest
small_price = latest_close < 5
# Classifier
nyse_filter = exchange.eq('NYS')
# Pass in the mask
mean_close_10 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 10,
mask = small_price)
mean_close_30 = SimpleMovingAverage(inputs = [USEquityPricing.close],
window_length = 30,
mask = small_price)
percent_difference = (mean_close_10-mean_close_30) / mean_close_30
perc_diff_check = percent_difference > 0
final_filter = perc_diff_check & nyse_filter
return Pipeline(columns = {
'Percent Difference':percent_difference,
'30 Day Mean Close':mean_close_30,
'Latest Close':latest_close,
'Positive Percent Diff': perc_diff_check},
screen=final_filter)
results = run_pipeline(make_pipeline(),
'2017-01-01',
'2017-01-01')
results.head()
len(results)
from quantopian.pipeline import Pipeline
from quantopian.algorithm import attach_pipeline, pipeline_output
def initialize(context):
my_pipe = make_pipeline()
attach_pipeline(my_pipe, 'my_pipeline')
def make_pipeline():
return Pipeline()
def before_trading_start(context, data):
# Store our pipeline output DataFrame in context.
context.output = pipeline_output('my_pipeline')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Factors
Step3: Combining Factors
Step4: Filters and Screens
Step5: Screens
Step6: Reverse a screen
Step7: Combine Filters
Step8: Masking
Step9: Classifiers
Step10: Classifier Methods
Step11: Pipelines in Quantopian IDE
|
10,289
|
<ASSISTANT_TASK:>
Python Code:
data = [
{'age': 33, 'sex': 'F', 'BP': 'high', 'cholesterol': 'high', 'Na': 0.66, 'K': 0.06, 'drug': 'A'},
{'age': 77, 'sex': 'F', 'BP': 'high', 'cholesterol': 'normal', 'Na': 0.19, 'K': 0.03, 'drug': 'D'},
{'age': 88, 'sex': 'M', 'BP': 'normal', 'cholesterol': 'normal', 'Na': 0.80, 'K': 0.05, 'drug': 'B'},
{'age': 39, 'sex': 'F', 'BP': 'low', 'cholesterol': 'normal', 'Na': 0.19, 'K': 0.02, 'drug': 'C'},
{'age': 43, 'sex': 'M', 'BP': 'normal', 'cholesterol': 'high', 'Na': 0.36, 'K': 0.03, 'drug': 'D'},
{'age': 82, 'sex': 'F', 'BP': 'normal', 'cholesterol': 'normal', 'Na': 0.09, 'K': 0.09, 'drug': 'C'},
{'age': 40, 'sex': 'M', 'BP': 'high', 'cholesterol': 'normal', 'Na': 0.89, 'K': 0.02, 'drug': 'A'},
{'age': 88, 'sex': 'M', 'BP': 'normal', 'cholesterol': 'normal', 'Na': 0.80, 'K': 0.05, 'drug': 'B'},
{'age': 29, 'sex': 'F', 'BP': 'high', 'cholesterol': 'normal', 'Na': 0.35, 'K': 0.04, 'drug': 'D'},
{'age': 53, 'sex': 'F', 'BP': 'normal', 'cholesterol': 'normal', 'Na': 0.54, 'K': 0.06, 'drug': 'C'},
{'age': 36, 'sex': 'F', 'BP': 'high', 'cholesterol': 'high', 'Na': 0.53, 'K': 0.05, 'drug': 'A'},
{'age': 63, 'sex': 'M', 'BP': 'low', 'cholesterol': 'high', 'Na': 0.86, 'K': 0.09, 'drug': 'B'},
{'age': 60, 'sex': 'M', 'BP': 'low', 'cholesterol': 'normal', 'Na': 0.66, 'K': 0.04, 'drug': 'C'},
{'age': 55, 'sex': 'M', 'BP': 'high', 'cholesterol': 'high', 'Na': 0.82, 'K': 0.04, 'drug': 'B'},
{'age': 35, 'sex': 'F', 'BP': 'normal', 'cholesterol': 'high', 'Na': 0.27, 'K': 0.03, 'drug': 'D'},
{'age': 23, 'sex': 'F', 'BP': 'high', 'cholesterol': 'high', 'Na': 0.55, 'K': 0.08, 'drug': 'A'},
{'age': 49, 'sex': 'F', 'BP': 'low', 'cholesterol': 'normal', 'Na': 0.27, 'K': 0.05, 'drug': 'C'},
{'age': 27, 'sex': 'M', 'BP': 'normal', 'cholesterol': 'normal', 'Na': 0.77, 'K': 0.02, 'drug': 'B'},
{'age': 51, 'sex': 'F', 'BP': 'low', 'cholesterol': 'high', 'Na': 0.20, 'K': 0.02, 'drug': 'D'},
{'age': 38, 'sex': 'M', 'BP': 'high', 'cholesterol': 'normal', 'Na': 0.78, 'K': 0.05, 'drug': 'A'}
]
target = [d['drug'] for d in data]
target
[d.pop('drug') for d in data];
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
age = [d['age'] for d in data]
age
sodium = [d['Na'] for d in data]
potassium = [d['K'] for d in data]
plt.figure(figsize=(10, 6))
plt.scatter(sodium, potassium)
plt.xlabel('sodium')
plt.ylabel('potassium')
target = [ord(t) - 65 for t in target]
target
plt.figure(figsize=(14, 10))
plt.subplot(221)
plt.scatter([d['Na'] for d in data], [d['K'] for d in data],
c=target, s=100)
plt.xlabel('sodium (Na)')
plt.ylabel('potassium (K)')
plt.subplot(222)
plt.scatter([d['age'] for d in data], [d['K'] for d in data],
c=target, s=100)
plt.xlabel('age')
plt.ylabel('potassium (K)')
plt.subplot(223)
plt.scatter([d['age'] for d in data], [d['Na'] for d in data],
c=target, s=100)
plt.xlabel('age')
plt.ylabel('sodium (Na)')
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer(sparse=False)
data_pre = vec.fit_transform(data)
vec.get_feature_names()
data_pre[0]
import numpy as np
data_pre = np.array(data_pre, dtype=np.float32)
target = np.array(target, dtype=np.float32).reshape((-1, 1))
data_pre.shape, target.shape
import sklearn.model_selection as ms
X_train, X_test, y_train, y_test = ms.train_test_split(
data_pre, target, test_size=5, random_state=42
)
import cv2
dtree = cv2.ml.DTrees_create()
# dtree.train(X_train, cv2.ml.ROW_SAMPLE, y_train)
# y_pred = dtree.predict(X_test)
# from sklearn import metrics
# metrics.accuracy_score(y_test, dtree.predict(X_test))
# metrics.accuracy_score(y_train, dtree.predict(X_train))
from sklearn import tree
dtc = tree.DecisionTreeClassifier()
dtc.fit(X_train, y_train)
dtc.score(X_train, y_train)
dtc.score(X_test, y_test)
with open("tree.dot", 'w') as f:
f = tree.export_graphviz(dtc, out_file=f,
feature_names=vec.get_feature_names(),
class_names=['A', 'B', 'C', 'D'])
dtc.feature_importances_
plt.figure(figsize=(12, 6))
plt.barh(range(10), dtc.feature_importances_, align='center', tick_label=vec.get_feature_names())
dtce = tree.DecisionTreeClassifier(criterion='entropy')
dtce.fit(X_train, y_train)
dtce.score(X_train, y_train)
dtce.score(X_test, y_test)
with open("tree.dot", 'w') as f:
f = tree.export_graphviz(dtce, out_file=f,
feature_names=vec.get_feature_names(),
class_names=['A', 'B', 'C', 'D'])
dtc0 = tree.DecisionTreeClassifier(criterion='entropy', max_leaf_nodes=6)
dtc0.fit(X_train, y_train)
dtc0.score(X_train, y_train)
dtc0.score(X_test, y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Understanding the task by understanding the data
Step2: Then remove the 'drug' entry from all the dictionaries
Step3: Sweet! Now let's look at the data
Step4: But, what we really want is to color the data points according to their target labels
Step5: Preprocessing the data
Step6: Convert to 32-bit floating point numbers in order to make OpenCV happy
Step7: Then split data into training and test sets
Step8: Building the decision tree
Step9: Then train the model
Step10: Predict some values
Step11: Calculate the score on the training and test sets
Step12: Visualizing a trained decision tree
Step13: The model is trained by calling fit
Step14: Now, here's the cool thing
Step15: Then, back on the command line, you can use GraphViz to turn "tree.dot" into (for example) a PNG file
Step16: If we remind ourselves of the feature names, it will become clear which features seem to be the most important. A plot might be most informative
Step17: Understanding decision rules
Step18: Controlling the complexity of decision trees
|
10,290
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
import utils
%matplotlib inline
%load_ext autoreload
%autoreload 2
CSV_PATH = '../../data/unique_counts_semi.csv'
# load data
initial_df = utils.load_queries(CSV_PATH)
# filter out queries with length less than 2 characters long
start_num = len(initial_df)
df = utils.clean_queries(initial_df)
print("{} distinct queries after stripping {} queries of length 1".format(len(df), start_num-len(df)))
print("Yielding a total of {} query occurrences.".format(df['countqstring'].sum()))
df.head(10)
total = df['countqstring'].sum()
fig, ax = plt.subplots(ncols=2, figsize=(20, 8))
cum_coverage = pd.Series(range(1,200)).apply(lambda n: df[df['countqstring'] <= n]['countqstring'].sum())/total
cum_coverage = cum_coverage*100
cum_coverage = cum_coverage.round(2)
# plot the cumulative coverage
cum_coverage.plot(ax=ax[0])
ax[0].set_xlabel('Query Frequency')
ax[0].set_ylabel('Cumulative Coverage (%)')
# see if it looks Zipfian. ie plot a log-log graph of query frequency against query rank
df.plot(ax=ax[1], y='countqstring', use_index=True, logx=True, logy=True)
ax[1].set_xlabel('Rank of Query (ie most frequent to least frequent)')
ax[1].set_ylabel('Query Frequency');
print("Freq Cumulative Coverage")
for i, val in enumerate(cum_coverage[:10].get_values()):
print("{:>2} {:0<5}%".format(i+1, val))
print(utils.get_user_results('annotator1'))
print('\n')
print(utils.get_user_results('annotator2'))
print('\n')
print(utils.get_user_results('martin'))
user_pairs = [
['annotator1', 'annotator2'],
['martin', 'annotator1'],
['martin', 'annotator2'],
]
results = utils.do_iaa_pairs(user_pairs)
utils.print_iaa_pairs(results, user_pairs)
results = utils.do_iaa_pairs(user_pairs, questions=(2,3), level='coarse')
utils.print_iaa_pairs(results, user_pairs)
for question in (1,2,3):
print(utils.show_agreement(question, ['annotator1', 'annotator2', 'martin']))
print('\n')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Do some cleanup
Step2: Query Frequency Analysis
Step3: The frequency of queries drops off pretty quickly, suggesting a long tail of low frequency queries. Let's get a sense of this by looking at the cumulative coverage of queries with frequencies between 1 and 10.
Step4: ie queries with a frequency of 1 account for about 30% of queries, queries with frequency of 2 or less account for 48%, 3 or less account for 58%, etc.
Step5: Comments
Step6: These scores are not particularly high. We're struggling to get into even 'tentative' reliability land. We're probably going to need to do some disagreement analysis to work out what's going on.
Step7: Agreement has improved, especially for Q2. Q3, however, is still a bit on the low side.
|
10,291
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
from stemgraphic import stem_graphic
df = pd.read_csv('../iris.csv')
df.describe()
stem_graphic(df['sepal_length']);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load a data frame
Step2: Select a column, or pass the whole dataframe if you want stem_graphic to select the first numerical column.
|
10,292
|
<ASSISTANT_TASK:>
Python Code:
from PyUGC import *
from PyUGC.Stream import UGC
from PyUGC.Base import OGDC
from PyUGC import Engine
from PyUGC import FileParser
from PyUGC import DataExchange
import datasource
#help(UGC)
#help(OGDC)
#help(datasource)
import os
basepath = os.path.join(os.getcwd(),"../data")
print("Data path: ", basepath)
file1 = basepath + u"/Shape/countries.shp"
print("Data file: ", file1)
file2 = basepath + u"/Raster/astronaut(CMYK)_32.tif"
print("Data file: ", file2)
file3 = basepath + u"/Grid/grid_Int32.grd"
print("Data file: ", file3)
datapath_out = basepath + u"/GIScript_Test.udb"
print("Output UDB: ",datapath_out)
def Import_Test():
print("Export to UDB: ",datapath_out)
ds = datasource.CreateDatasource(UGC.UDB,datapath_out)
datasource.ImportVector(file1,ds)
datasource.ImportRaster(file2,ds)
datasource.ImportGrid(file3,ds)
ds.Close()
del ds
print("Finished.")
try:
Import_Test()
except Exception as ex:
print(ex)
!ls -l -h ../data/GIScript_Test.*
!rm ../data/GIScript_Test.*
!ls -l -h ../data/GIScript_Test.*
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2、使用Python的help(...)查看库的元数据信息获得帮助。
Step2: 3、设置测试数据目录。
Step3: 4、导入数据的测试函数。
Step4: 5、运行这个测试。
Step5: (三)查看生成的数据源文件UDB。
Step6: <font color="red">删除生成的测试文件。注意,不要误删其它文件!</font>
Step7: 再次查看目录,文件是否存在。
|
10,293
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
self.activation_function = lambda x : 1 / (1 + np.exp(-x))
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
### Forward pass ###
# Hidden layer
hidden_inputs = np.dot(X, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
# Output layer
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs # f(x) = y
### Backward pass ###
# Output error
error = y - final_outputs
# Calculate the hidden layer's contribution to the error
hidden_error = np.dot(self.weights_hidden_to_output, error)
# Backpropagated error terms
output_error_term = error # f'(x) = 1
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:, None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
# Update the weights
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### forward pass ####
# Hidden layer
hidden_inputs = np.dot(features, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
# Output layer
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs # f(x) = y
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
def is_overfitting(train_losses, val_losses):
''' Does a rough check to see if the losses (sampled at regular intervals) indicate over-fitting is happening.
This is done by checking if average of last n values is greater the average of the previous n values (to
on a one-time random blip) for both val_losses, and the gap between train_losses and val_losses.
I don't know if that's a good heuristic for it or not, I'm just trying to avoid training for a configuration
of hyperparameters that appears to be trending poorly and should be abandoned. '''
if(len(val_losses) < 6):
return False
val_losses = np.array(val_losses)
last_val_losses_avg = val_losses[-3:].mean()
prev_val_losses_avg = val_losses[-6:-3].mean()
if(last_val_losses_avg > prev_val_losses_avg):
# val losses are increasing
return True
train_losses = np.array(train_losses)
last_train_losses_avg = train_losses[-3:].mean()
prev_train_losses_avg = train_losses[-6:-3].mean()
last_gap_avg = last_val_losses_avg - last_train_losses_avg
prev_gap_avg = prev_val_losses_avg - prev_train_losses_avg
if(last_gap_avg > prev_gap_avg):
# the gap between val losses and train losses is increasing
return True
return False
import sys
### Set the hyperparameters here ###
iterations = 10000
learning_rates_list = [2, 1, 0.5, 0.1, 0.05, 0.001]
hidden_nodes_list = [5, 10, 15, 20, 25, 30, 40, 50, 75, 100]
output_nodes = 1
N_i = train_features.shape[1]
for learning_rate in learning_rates_list:
for hidden_nodes in hidden_nodes_list:
print("iterations \t learning_rate \t hidden_nodes \t train_loss \t val_loss")
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
train_losses = []
val_losses = []
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# check progress every 500 iterations
if ii % 500 == 0:
current_train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
current_val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
train_losses.append(current_train_loss)
val_losses.append(current_val_loss)
#print the progress thus far
output = "{} \t\t {} \t\t {} \t\t {:2.3f} \t\t {:2.3f}"
print(output.format(ii, learning_rate, hidden_nodes, current_train_loss, current_val_loss))
# if these settings seem to be overfitting
if is_overfitting(train_losses, val_losses):
print("Over-fitting detected")
break # move on to the next hyperparameter settings
else:
continue # keep going on next 500 iterations
### Set the hyperparameters here ###
iterations = 10000
learning_rates_list = [1, 1.2, 0.8, 1, 1.2, 0.8, 1, 1.2, 0.8, 1, 1.2, 0.8, 1, 1.2, 0.8,
0.4, 0.5, 0.6, 0.4, 0.5, 0.6, 0.4, 0.5, 0.6, 0.4, 0.5, 0.6, 0.4, 0.5, 0.6, 0.4, 0.5, 0.6]
hidden_nodes_list = [8, 8, 8, 10, 10, 10, 13, 13, 13, 15, 15, 15, 17, 17, 17,
18, 18, 18, 20, 20, 20, 25, 25, 25, 30, 30, 30, 40, 40, 40, 50, 50, 50]
output_nodes = 1
if(len(learning_rates_list) != len(hidden_nodes_list)):
print("your configuration is wrong, you probably want to abort...")
N_i = train_features.shape[1]
for learning_rate, hidden_nodes in zip(learning_rates_list, hidden_nodes_list):
print("iterations \t learning_rate \t hidden_nodes \t train_loss \t val_loss")
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
train_losses = []
val_losses = []
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# check progress every 500 iterations
if ii % 500 == 0:
current_train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
current_val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
train_losses.append(current_train_loss)
val_losses.append(current_val_loss)
#print the progress thus far
output = "{} \t\t {} \t\t {} \t\t {:2.3f} \t\t {:2.3f}"
print(output.format(ii, learning_rate, hidden_nodes, current_train_loss, current_val_loss))
# if these settings seem to be overfitting
if is_overfitting(train_losses, val_losses):
print("Over-fitting detected")
break # move on to the next hyperparameter settings
else:
continue # keep going on next 500 iterations
import sys
### Set the hyperparameters here ###
iterations = 5000
learning_rate = 0.6
hidden_nodes = 10
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim([0, 2]) # limit y range just so the lower end is clearer
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Hyperparameter searching part 2
Step11: Selecting Hyperparameters
Step12: Check out your predictions
|
10,294
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
vocab_to_int = {}
int_to_vocab = {}
i=0
for word in text:
if vocab_to_int.get(word)==None:
vocab_to_int[word]=i
int_to_vocab[i]=word
i+=1
#
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
Punctuation = { "." :"||Period||",
"," :"||Comma||",
"\"":"||Quotation_Mark||",
";" :"||Semicolon||",
"!" :"||Exclamation_Mark||",
"?" :"||Question_Mark||",
"(" :"||Left_Parentheses||",
")" :"||Right_Parentheses||",
"--" :"||Dash||",
"\n" :"||Return||",}
return Punctuation
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
_input = tf.placeholder(tf.int32,shape=[None, None],name="input")
_targets = tf.placeholder(tf.int32,shape=[None, None],name='targets')
_lr = tf.placeholder(tf.float32,name='learing_rate')
return _input, _targets, _lr
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
#print(batch_size,rnn_size)
# TODO: Implement Function
num_layers = 2
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
#cell = tf.contrib.rnn.MultiRNNCell([lstm] * num_layers)
keep_prob = 1 #0.5
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
#embedding = tf.get_variable("embedding", [vocab_size, embed_dim])
embedding = tf.Variable(tf.random_uniform([vocab_size, embed_dim],-1,1))
_inputs = tf.nn.embedding_lookup(embedding, input_data)
return _inputs
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype = tf.float32)
final_state=tf.identity(final_state, name='final_state')
return outputs, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
inputs = get_embed(input_data,vocab_size,rnn_size)
cell,final_state = build_rnn(cell, inputs)
#logits = tf.contrib.layers.fully_connected(cell, vocab_size)
logits = tf.contrib.layers.fully_connected(cell, vocab_size, activation_fn = None)
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
'''
int_text=np.array(int_text)
slice_size = batch_size * seq_length
n_batches = int(len(int_text) / slice_size)
batches=np.zeros((n_batches,2,batch_size,seq_length))
x = int_text[: n_batches*slice_size]
y = int_text[1: n_batches*slice_size + 1]
x = np.split(x, batch_size *n_batches)
y = np.split(y, batch_size *n_batches)
for i in range(n_batches):
outx=np.zeros((batch_size,seq_length))
outy=np.zeros((batch_size,seq_length))
for j in range(batch_size):
outx[j] = x[i+j*n_batches]
outy[j] = y[i+j*n_batches]
batches[i][0]=outx
batches[i][1]=outy
return batches
'''
slice_size = int(len(int_text)/batch_size)
X_train = np.asarray(int_text[:-1])
y_train = np.asarray(int_text[1:])
n_batches = int(slice_size/seq_length)
# Drop the last few characters to make only full batches
xdata = np.array(int_text[: n_batches * batch_size * seq_length])
ydata = np.array(int_text[1: n_batches * batch_size * seq_length + 1])
x_batches = np.split(xdata.reshape(batch_size, -1), n_batches, 1)
y_batches = np.split(ydata.reshape(batch_size, -1), n_batches, 1)
return np.array(list(zip(x_batches, y_batches)))
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 80
# Batch Size
batch_size = 23
# RNN Size
rnn_size = 256
# Sequence Length
seq_length = 33
# Learning Rate
learning_rate = 0.005
# Show stats for every n number of batches
show_every_n_batches = 100
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
import datetime
import sys
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
all_start = datetime.datetime.now()
start = datetime.datetime.now()
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
end = datetime.datetime.now()
sys.stdout.write('\rEpoch {:>3} Batch {:>4}/{} train_loss = {:.3f} time = {}'.format(
epoch_i,
batch_i,
len(batches),
train_loss,
(end-start)))
#
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f} time = {}'.format(
epoch_i,
batch_i,
len(batches),
train_loss,
(end-start)))
start = datetime.datetime.now()
# Save Model
all_end = datetime.datetime.now()
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('\nModel Trained and Saved Time {} train_loss = {:.3f}'.format(all_end-all_start,train_loss))
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
InputTensor=loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor=loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor=loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor=loaded_graph.get_tensor_by_name("probs:0")
return InputTensor,InitialStateTensor,FinalStateTensor,ProbsTensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
top_n=3
p = probabilities
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(len(int_to_vocab), 1, p=p)[0]
c = int_to_vocab[c]
return c
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step7: Implement Preprocessing Functions
Step10: Tokenize Punctuation
Step12: Preprocess all the data and save it
Step14: Check Point
Step16: Build the Neural Network
Step19: Input
Step22: Build RNN Cell and Initialize
Step25: Word Embedding
Step28: Build RNN
Step31: Build the Neural Network
Step34: Batches
Step36: Neural Network Training
Step38: Build the Graph
Step41: Train
Step43: Save Parameters
Step45: Checkpoint
Step48: Implement Generate Functions
Step51: Choose Word
Step53: Generate TV Script
|
10,295
|
<ASSISTANT_TASK:>
Python Code:
control=input()
import getpass
password=getpass.getpass()
# Delete Jobs
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # suppress warnings
# Figure out samples from the header of the input file
with open('inputdata_workshop.xls', 'r') as f: header = f.readline()
samples = list(set(map(lambda s: s[1:-1][:5],header.split("\t"))))
samples.remove('BLANK')
# Delete all of them
for s in samples:
url="https://admin:"+password+"@"+control+"/chronos/scheduler/job/cv-"+s.replace(".", "_")
response=requests.delete(url, verify=False)
print(s + " HTTP response code: " + str(response.status_code))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Insert your admin password
Step2: Deleting script
|
10,296
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
from astropy.visualization import (MinMaxInterval,
LogStretch,
ImageNormalize)
%matplotlib inline
hdu = fits.open('./data/w5.fits')[0]
wcs = WCS(hdu.header)
hdu2 = fits.open('./data/0259p6031_1342192088_SpirePhoto_L20_PMP350_SPG14.0.fits.gz')[1]
norm = ImageNormalize(hdu.data, interval=MinMaxInterval(),
stretch=LogStretch())
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(projection=wcs)
overlay = ax.get_coords_overlay('galactic')
plt.imshow(hdu.data, norm=norm, origin="lower", cmap='Greys_r')
ax.coords['ra'].set_ticks(color='green')
ax.coords['dec'].set_ticks(color='green')
ax.coords['ra'].set_axislabel('Right Ascension')
ax.coords['dec'].set_axislabel('Declination')
ax.coords.grid(color='green', linestyle='solid', alpha=1.0)
overlay['l'].set_ticks(color='cyan')
overlay['b'].set_ticks(color='cyan')
overlay['l'].set_axislabel('Galactic Longitude')
overlay['b'].set_axislabel('Galactic Latitude')
overlay.grid(color='cyan', linestyle='solid', alpha=1.0)
ax.contour(hdu2.data, transform=ax.get_transform(WCS(hdu2.header)),
levels=[0.7,1.4,3], colors='white');
import numpy as np
from astropy.visualization import make_lupton_rgb
from astropy.io import fits
from reproject import reproject_interp
# Read in the three images downloaded from here:
g = fits.open('http://dr13.sdss.org/sas/dr13/eboss/photoObj/frames/301/1737/5/frame-g-001737-5-0039.fits.bz2')[0]
r = fits.open('http://dr13.sdss.org/sas/dr13/eboss/photoObj/frames/301/1737/5/frame-r-001737-5-0039.fits.bz2')[0]
i = fits.open('http://dr13.sdss.org/sas/dr13/eboss/photoObj/frames/301/1737/5/frame-i-001737-5-0039.fits.bz2')[0]
# remap r and i onto g
r_new, r_mask = reproject_interp(r, g.header)
i_new, i_mask = reproject_interp(i, g.header)
# zero out the unmapped values
i_new[np.logical_not(i_mask)] = 0
r_new[np.logical_not(r_mask)] = 0
# red=i, green=r, blue=g
# make a file with the default scaling
rgb_default = make_lupton_rgb(i_new, r_new, g.data, filename="ngc6976-default.jpeg")
# this scaling is very similar to the one used in Lupton et al. (2004)
rgb = make_lupton_rgb(i_new, r_new, g.data, Q=10, stretch=0.5, filename="ngc6976.jpeg")
w5_250 = fits.open('./data/0259p6031_1342192088_SpirePhoto_L20_PMP250_SPG14.0.fits.gz')[1]
w5_350 = fits.open('./data/0259p6031_1342192088_SpirePhoto_L20_PMP350_SPG14.0.fits.gz')[1]
w5_500 = fits.open('./data/0259p6031_1342192088_SpirePhoto_L20_PMP500_SPG14.0.fits.gz')[1]
im250, msk250 = reproject_interp(w5_250, w5_500.header)
im350, msk350 = reproject_interp(w5_350, w5_500.header)
# zero out the unmapped values
im250[np.logical_not(msk250)] = 0
im350[np.logical_not(msk350)] = 0
rgb_w5_default = make_lupton_rgb(im250, im350, w5_500.data, filename="w5-default.jpeg")
rgb_w5 = make_lupton_rgb(im250, im350, w5_500.data, Q=10, stretch=0.5, filename="w5.jpeg")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. RGB-3-color images
Step2: 3. RGB colors of Herschel-SPIRE images
Step3:
|
10,297
|
<ASSISTANT_TASK:>
Python Code:
!head -n 2 ../data/yelp/yelp_training_set_review.json
reviews = gl.SFrame.read_csv('../data/yelp/yelp_training_set_review.json', header = False)
reviews
reviews[0]
reviews=reviews.unpack('X1','')
reviews
reviews = reviews.unpack('votes', '')
reviews
reviews.show()
gl.canvas.set_target('ipynb')
reviews['stars'].show(view = 'Categorical')
#ignore all 3* reviews
reviews = reviews[reviews['stars'] != 3]
#positive sentiment = 4* or 5* reviews
reviews['sentiment'] = reviews['stars'] >=4
reviews['sentiment'].show(view = 'Categorical')
reviews['word_count'] = gl.text_analytics.count_words(reviews['text'])
reviews['word_count']
train_data, test_data = reviews.random_split(.8, seed=0)
sentiment_model = gl.logistic_classifier.create(train_data,
target='sentiment',
features=['word_count'],
validation_set=test_data)
sentiment_model.evaluate(test_data, metric='roc_curve')
sentiment_model.show(view='Evaluation')
most_popular_business = 'VVeogjZya58oiTxK7qUjAQ'
most_popular_business_data = test_data[test_data['business_id'] == most_popular_business]
most_popular_business_data
most_popular_business_data['predictions'] = sentiment_model.predict(most_popular_business_data,
output_type = 'probability')
most_popular_business_data = most_popular_business_data.sort('predictions')
print most_popular_business_data['text'][1]
print most_popular_business_data['text'][-2]
# 7. Deployment
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unpack to extract structure
Step2: Votes are still crammed in a dictionary. Let's unpack it.
Step3: Quick data visualization
Step4: 3. Problem formulation
Step5: 4. Feature engineering
Step6: 5. Model/Algorithm selection & training
Step7: 6a. Evaluate the model (Quantitatively)
Step8: 6b. Evaluate the model (Qualitatively)
Step9: Sort the reviews based on the predicted sentiment and explore
Step10: Explore some very bad sentiment reviews
Step11: Explore some very good sentiment reviews
|
10,298
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from simmit import smartplus as sim
import os
E = 70000.0
nu = 0.3
L = sim.L_iso(E,nu,"Enu")
print np.array_str(L, precision=4, suppress_small=True)
d = sim.check_symetries(L)
print(d['umat_type'])
print(d['props'])
x = sim.L_iso_props(L)
print(x)
E = 70000.0
nu = 0.3
M = sim.M_iso(E,nu,"Enu")
print np.array_str(M, precision=2)
L_inv = np.linalg.inv(M)
d = sim.check_symetries(L_inv)
print(d['umat_type'])
print(d['props'])
x = sim.M_iso_props(M)
print(x)
E = 70000.0
nu = 0.3
G = 23000.0
L = sim.L_cubic(E,nu,G,"EnuG")
print np.array_str(L, precision=2)
d = sim.check_symetries(L)
print(d['umat_type'])
print(d['props'])
x = sim.L_cubic_props(L)
print(x)
E = 70000.0
nu = 0.3
G = 23000.0
M = sim.M_cubic(E,nu,G,"EnuG")
print np.array_str(M, precision=2)
L = np.linalg.inv(M)
d = sim.check_symetries(L)
print(d['umat_type'])
print(d['props'])
x = sim.L_cubic_props(L)
print(x)
EL = 70000.0
ET = 20000.0
nuTL = 0.08
nuTT = 0.3
GLT = 12000.0
axis = 3
L = sim.L_isotrans(EL,ET,nuTL,nuTT,GLT,axis)
print np.array_str(L, precision=2)
d = sim.check_symetries(L)
print(d['umat_type'])
print(d['axis'])
print np.array_str(d['props'], precision=2)
x = sim.L_isotrans_props(L,axis)
print np.array_str(x, precision=2)
v = sim.Ith()
print v
v = sim.Ir2()
print v
v = sim.Ir05()
print v
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: L_iso
Step2: M_iso
Step3: L_cubic
Step4: M_cubic
Step5: L_isotrans
Step6: bp
Step7: Ir2()
Step8: Ir05()
|
10,299
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df=pd.DataFrame(data=[[1,2],[3,4],[1,2],[1,4],[1,2]],columns=['col1','col2'])
def g(df):
df['index_original'] = df.groupby(['col1', 'col2']).col1.transform('idxmax')
for i in range(len(df)):
i = len(df) - 1 - i
origin = df.loc[i, 'index_original']
if i <= origin:
continue
if origin == df.loc[origin, 'index_original']:
df.loc[origin, 'index_original'] = i
df.loc[i, 'index_original'] = df.loc[origin, 'index_original']
return df[df.duplicated(subset=['col1', 'col2'], keep='last')]
result = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.