Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
13,900
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from sklearn.preprocessing import Imputer
from sklearn import tree
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
% matplotlib inline
#train_df = pd.read_csv("titanic.csv")
#test_df = pd.read_csv("titanic_test.csv")
from google.colab import files
import io
uploaded = files.upload()
train_df = pd.read_csv(io.StringIO(uploaded['titanic.csv'].decode('utf-8')))
uploaded = files.upload()
test_df = pd.read_csv(io.StringIO(uploaded['titanic_test.csv'].decode('utf-8')))
train_df.head()
train_df["Sex"] = train_df["Sex"].apply(lambda sex: 0 if sex == 'male' else 1)
y = targets = labels = train_df["Survived"].values
columns = ["Fare", "Pclass", "Sex", "Age", "SibSp"]
features = train_df[list(columns)].values
features
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
X = imp.fit_transform(features)
X
my_tree_one = tree.DecisionTreeClassifier(criterion="entropy", max_depth=3)
my_tree_one = my_tree_one.fit(X, y)
#The feature_importances_ attribute make it simple to interpret the significance of the predictors you include
print(my_tree_one.feature_importances_)
print(my_tree_one.score(X, y))
with open("titanic.dot", 'w') as f:
f = tree.export_graphviz(my_tree_one, out_file=f, feature_names=columns)
files.download('titanic.dot')
test_df.head()
test_df["Sex"] = test_df["Sex"].apply(lambda sex: 0 if sex == 'male' else 1)
#features_test = train_df[list(columns)].values
features_test = test_df[list(columns)].values
imp_test = Imputer(missing_values='NaN', strategy='mean', axis=0)
X_test = imp_test.fit_transform(features_test)
X_test
pred = my_tree_one.predict(X_test)
pred
#Print Confusion matrix
pred = my_tree_one.predict(X)
df_confusion = metrics.confusion_matrix(y, pred)
df_confusion
def plot_confusion_matrix(df_confusion, title='Confusion matrix', cmap=plt.cm.gray_r):
plt.matshow(df_confusion, cmap=cmap) # imshow
plt.title('Confusion Matrix')
plt.colorbar()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plot_confusion_matrix(df_confusion)
#Setting "max_depth" to 10 and "min_samples_split" to 5 : my_tree_two
max_depth = 10
min_samples_split = 5
my_tree_two = tree.DecisionTreeClassifier(max_depth = 10, min_samples_split = 5, random_state = 1)
my_tree_two = my_tree_two.fit(X, y)
#Print the score of the new decison tree
print(my_tree_two.score(X, y))
pred = my_tree_two.predict(X)
df_confusion = metrics.confusion_matrix(y, pred)
df_confusion
def plot_confusion_matrix(df_confusion, title='Confusion matrix', cmap=plt.cm.gray_r):
plt.matshow(df_confusion, cmap=cmap) # imshow
plt.title('Confusion Matrix')
plt.colorbar()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plot_confusion_matrix(df_confusion)
# Add new feature and then train the model
train_df["family_size"] = train_df.SibSp + train_df.Parch + 1
from sklearn.ensemble import RandomForestClassifier
# Building and fitting my_forest
forest = RandomForestClassifier(max_depth = 10, min_samples_split=2, n_estimators = 100, random_state = 1)
my_forest = forest.fit(X, y)
# Print the score of the fitted random forest
print(my_forest.score(X, y))
pred = my_forest.predict(X)
df_confusion = metrics.confusion_matrix(y, pred)
df_confusion
fpr, tpr, _ = metrics.roc_curve(y, pred)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b',
label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
# ROC curve on Predicted probabilities
pred_proba = my_forest.predict_proba(X)
fpr, tpr, _ = metrics.roc_curve(y, pred_proba[:,1])
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b',
label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: visit http
Step2: What happen when we change the tree depth?
Step3: Try by yourself
Step4: Random Forest
|
13,901
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.extend(['../'])
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
%matplotlib inline
import onsager.crystal as crystal
import onsager.OnsagerCalc as onsager
from scipy.constants import physical_constants
kB = physical_constants['Boltzmann constant in eV/K'][0]
betaCo = crystal.Crystal.FCC(1.0, 'Co')
print(betaCo)
betaCo.Wyckoffpos(np.array([0.5,0.,0.]))
betaCoO = betaCo.addbasis(betaCo.Wyckoffpos(np.array([0.5,0.,0.])), ['O'])
print(betaCoO)
Ojumpnetwork = betaCoO.jumpnetwork(1,0.5)
Odiffuser = onsager.Interstitial(betaCoO, 1, betaCoO.sitelist(1), Ojumpnetwork)
Ppara, Pperp, Pshear = -2.70, -4.30, 0.13
reppos = betaCoO.pos2cart(np.zeros(3), (1, Odiffuser.sitelist[0][0]))
perpindex = [n for n in range(3) if np.isclose(reppos[n], 0)][0]
paraindex = [n for n in range(3) if n != perpindex]
shearsign = 1 if reppos[paraindex[0]]*reppos[paraindex[1]] > 0 else -1
Pdipole = np.diag([Pperp if n == perpindex else Ppara for n in range(3)])
Pdipole[paraindex[0], paraindex[1]] = shearsign*Pshear
Pdipole[paraindex[1], paraindex[0]] = shearsign*Pshear
Pdipole
nu0, Emig = 1e13, 0.91
nsites, njumps = len(Odiffuser.sitelist), len(Odiffuser.jumpnetwork)
betaCoOthermodict = {'pre': np.ones(nsites), 'ene': np.zeros(nsites),
'preT': nu0*np.ones(nsites), 'eneT': Emig*np.ones(nsites)}
beta = 1./(kB*300) # 300K
Llamb = Odiffuser.losstensors(betaCoOthermodict['pre'], beta*betaCoOthermodict['ene'],
[Pdipole],
betaCoOthermodict['preT'], beta*betaCoOthermodict['eneT'])
for (lamb, Ltens) in Llamb:
print(lamb, crystal.FourthRankIsotropic(Ltens))
sh1 = crystal.FourthRankIsotropic(Llamb[0][1])[1]
sh2 = crystal.FourthRankIsotropic(Llamb[1][1])[1]
print(sh2/sh1)
nuIF = 1.
Trange = np.linspace(250,400,151)
shlist = []
for T in Trange:
beta = 1./(kB*T)
Llamb = Odiffuser.losstensors(betaCoOthermodict['pre'], beta*betaCoOthermodict['ene'],
[Pdipole],
betaCoOthermodict['preT'], beta*betaCoOthermodict['eneT'])
f1,L1,f2,L2 = Llamb[0][0], Llamb[0][1], Llamb[1][0], Llamb[1][1]
sh = crystal.FourthRankIsotropic(L1*nuIF*f1/(nuIF**2+f1**2) +
L2*nuIF*f2/(nuIF**2+f2**2))[1]
shlist.append(sh*kB*T)
shear = np.array(shlist)
fig, ax1 = plt.subplots()
ax1.plot(Trange, shear/np.max(shear), 'k')
ax1.set_ylabel('loss $Q$ [unitless]', fontsize='x-large')
ax1.set_xlabel('$T$ [K]', fontsize='x-large')
plt.show()
# plt.savefig('FCC-Co-O-loss.pdf', transparent=True, format='pdf')
Trange[np.argmax(shear)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We need to analyze the geometry of our representative site; we get the position, then find the zero entry in the position vector, and work from there.
Step2: Internal friction resonance. We do loading at a frequency of 1 Hz.
Step3: Temperature where peak maximum is found?
|
13,902
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.spatial.distance import cdist
def centroids(X_nd, label_n):
Given X_nd, a 2-dimensional array of n d-dimensional points,
and n cluster assignments label_n (a 1-d array of n labels,
ints in range [0, k)), return (c_kd, dist_n) the k centroids c_kd and the
squared Euclidean distances dist_n from each point to its centroid.
Intentionally zero out any empty clusters.
n, d = X_nd.shape
k = label_n.max() + 1
c_kd = np.zeros((k, d))
dist_n = np.zeros(n)
for i in range(k):
ilabel_n = label_n == i
if not ilabel_n.sum():
continue
X_id = X_nd[ilabel_n]
c_kd[i] = X_id.mean(axis=0)
dist_n[ilabel_n] = cdist(c_kd[i:i+1, :], X_id, 'sqeuclidean').ravel()
return c_kd, dist_n
def inverse_permutation(p):
ip = np.empty_like(p)
ip[p] = np.arange(len(p))
return ip
def vcentroids(X, label):
Vectorized version of centroids.
# order points by cluster label
ix = np.argsort(label)
label = label[ix]
Xz = X[ix]
# compute pos where pos[i]:pos[i+1] is span of cluster i
d = np.diff(label, prepend=0) # binary mask where labels change
pos = np.flatnonzero(d) # indices where labels change
pos = np.repeat(pos, d[pos]) # repeat for 0-length clusters
pos = np.append(np.insert(pos, 0, 0), len(X))
# accumulate dimension sums
Xz = np.concatenate((np.zeros_like(Xz[0:1]), Xz), axis=0)
Xsums = np.cumsum(Xz, axis=0)
# reduce by taking differences of accumulations exactly at the
# endpoints for cluster indices, using pos array
Xsums = np.diff(Xsums[pos], axis=0)
counts = np.diff(pos)
c = Xsums / np.maximum(counts, 1)[:, np.newaxis]
# re-broadcast centroids for final distance calculation
repeated_centroids = np.repeat(c, counts, axis=0)
aligned_centroids = repeated_centroids[inverse_permutation(ix)]
dist = np.sum((X - aligned_centroids) ** 2, axis=1)
return c, dist
np.random.seed(1234)
n = 10000
d = 10
k = 10000
x = np.random.randn(n, d)
label = np.random.randint(k, size=n)
c0, dists0 = centroids(x, label)
c1, dists1 = vcentroids(x, label)
np.allclose(c0, c1), np.allclose(dists0, dists1)
%timeit centroids(x, label)
%timeit vcentroids(x, label)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Frequently, we run into situations where need to deal with arrays of varying sizes in numpy. These result in much slower code that deals with different sizes individually. Luckily, by extracting commutative and associative operations, we can vectorize even in such scenarios, resulting in significant speed improvements. This is especially pronounced when doing the same thing with deep learning packages like torch.
Step3: We want to do the same thing (mean and compute pairwise square distances) to each of these mixed-size X_id arrays, but the for i in range(k) loop is difficult to vectorize.
|
13,903
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('../Evaluation')
from eval_proposal import ANETproposal
import matplotlib.pyplot as plt
import numpy as np
import json
%matplotlib inline
def run_evaluation(ground_truth_filename, proposal_filename,
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation'):
anet_proposal = ANETproposal(ground_truth_filename, proposal_filename,
tiou_thresholds=tiou_thresholds,
max_avg_nr_proposals=max_avg_nr_proposals,
subset=subset, verbose=True, check_status=True)
anet_proposal.evaluate()
recall = anet_proposal.recall
average_recall = anet_proposal.avg_recall
average_nr_proposals = anet_proposal.proposals_per_video
return (average_nr_proposals, average_recall, recall)
def plot_metric(average_nr_proposals, average_recall, recall, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
fn_size = 14
plt.figure(num=None, figsize=(6, 5))
ax = plt.subplot(1,1,1)
colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
area_under_curve = np.zeros_like(tiou_thresholds)
for i in range(recall.shape[0]):
area_under_curve[i] = np.trapz(recall[i], average_nr_proposals)
for idx, tiou in enumerate(tiou_thresholds[::2]):
ax.plot(average_nr_proposals, recall[2*idx,:], color=colors[idx+1],
label="tiou=[" + str(tiou) + "], area=" + str(int(area_under_curve[2*idx]*100)/100.),
linewidth=4, linestyle='--', marker=None)
# Plots Average Recall vs Average number of proposals.
ax.plot(average_nr_proposals, average_recall, color=colors[0],
label="tiou = 0.5:0.05:0.95," + " area=" + str(int(np.trapz(average_recall, average_nr_proposals)*100)/100.),
linewidth=4, linestyle='-', marker=None)
handles, labels = ax.get_legend_handles_labels()
ax.legend([handles[-1]] + handles[:-1], [labels[-1]] + labels[:-1], loc='best')
plt.ylabel('Average Recall', fontsize=fn_size)
plt.xlabel('Average Number of Proposals per Video', fontsize=fn_size)
plt.grid(b=True, which="both")
plt.ylim([0, 1.0])
plt.setp(plt.axes().get_xticklabels(), fontsize=fn_size)
plt.setp(plt.axes().get_yticklabels(), fontsize=fn_size)
plt.show()
%%time
# seed the random number generator to get consistent results across multiple runs
np.random.seed(42)
with open("../Evaluation/data/activity_net.v1-3.min.json", 'r') as fobj:
gd_data = json.load(fobj)
subset='validation'
avg_nr_proposals = 100
proposal_data = {'results': {}, 'version': gd_data['version'], 'external_data': {}}
for vid_id, info in gd_data['database'].iteritems():
if subset != info['subset']:
continue
this_vid_proposals = []
for _ in range(avg_nr_proposals):
# generate random proposal center, length, and score
center = info['duration']*np.random.rand(1)[0]
length = info['duration']*np.random.rand(1)[0]
proposal = {
'score': np.random.rand(1)[0],
'segment': [center - length/2., center + length/2.],
}
this_vid_proposals += [proposal]
proposal_data['results'][vid_id] = this_vid_proposals
with open("../Evaluation/data/uniform_random_proposals.json", 'w') as fobj:
json.dump(proposal_data, fobj)
%%time
uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid = run_evaluation(
"../Evaluation/data/activity_net.v1-3.min.json",
"../Evaluation/data/uniform_random_proposals.json",
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation')
plot_metric(uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Help functions to evaluate a proposal submission file and plot the metric results
Step2: Generate uniform random proposal for the validation subset
Step3: Evaluate the uniform random proposals and plot the metric results
|
13,904
|
<ASSISTANT_TASK:>
Python Code:
def name_of_function(arg1,arg2):
'''
This is where the function's Document String (doc-string) goes
'''
# Do stuff here
#return desired result
def say_hello():
print 'hello'
say_hello()
def greeting(name):
print 'Hello %s' %name
greeting('Jose')
def add_num(num1,num2):
return num1+num2
add_num(4,5)
# Can also save as variable due to return
result = add_num(4,5)
print result
print add_num('one','two')
def is_prime(num):
'''
Naive method of checking for primes.
'''
for n in range(2,num):
if num % n == 0:
print 'not prime'
break
else: # If never mod zero, then prime
print 'prime'
is_prime(16)
import math
def is_prime(num):
'''
Better method of checking for primes.
'''
if num % 2 == 0 and num > 2:
return False
for i in range(3, int(math.sqrt(num)) + 1, 2):
if num % i == 0:
return False
return True
is_prime(14)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We begin with def then a space followed by the name of the function. Try to keep names relevant, for example len() is a good name for a length() function. Also be careful with names, you wouldn't want to call a function the same name as a built-in function in Python (such as len).
Step2: Call the function
Step3: Example 2
Step4: Using return
Step5: What happens if we input two strings?
Step6: Note that because we don't declare variable types in Python, this function could be used to add numbers or sequences together! We'll later learn about adding in checks to make sure a user puts in the correct arguments into a function.
Step7: Note how we break the code after the print statement! We can actually improve this by only checking to the square root of the target number, also we can disregard all even numbers after checking for 2. We'll also switch to returning a boolean value to get an example of using return statements
|
13,905
|
<ASSISTANT_TASK:>
Python Code:
%libraryDependencies += "org.viz.lightning" %% "lightning-scala" % "0.1.6"
%update
import org.viz.lightning._
import org.apache.spark.graphx._
val lgn = Lightning(host="https://lightning-spark-summit.herokuapp.com" )
lgn.enableNotebook()
val source = "/Users/mathisonian/projects/spark-summit/notebooks/data/allen-connectivity.txt"
val g = GraphLoader.edgeListFile(sc, source)
val links = g.edges.collect().map(e => Array(e.srcId.toInt, e.dstId.toInt))
lgn.force(links)
val links = g.edges.collect().map(e => Array(e.srcId.toInt, e.dstId.toInt))
val degrees = g.degrees.sortBy(_._1).collect().map(x => Math.log(x._2))
lgn.force(links, value=degrees, colormap="Lightning")
val links = g.edges.collect().map(e => Array(e.srcId.toInt, e.dstId.toInt))
val connectedComponents = g.connectedComponents().vertices.sortBy(_._1).map(_._2.toInt).collect()
lgn.force(links, label=connectedComponents)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load structure similarity data
Step2: Show the network (unlabeled)
Step3: Show the network colored by degree
Step4: Show the network colored by connected components
|
13,906
|
<ASSISTANT_TASK:>
Python Code:
from urllib.request import urlretrieve
from os.path import isfile
from tqdm import tqdm
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('train.p'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Train Dataset') as pbar:
urlretrieve(
'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/train.p',
'train.p',
pbar.hook)
if not isfile('test.p'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Test Dataset') as pbar:
urlretrieve(
'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/test.p',
'test.p',
pbar.hook)
print('Training and Test data downloaded.')
import pickle
import numpy as np
import math
# Fix error with TF and Keras
import tensorflow as tf
tf.python.control_flow_ops = tf
print('Modules loaded.')
with open('train.p', 'rb') as f:
data = pickle.load(f)
# TODO: Load the feature data to the variable X_train
X_train = data['features']
# TODO: Load the label data to the variable y_train
y_train = data['labels']
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert np.array_equal(X_train, data['features']), 'X_train not set to data[\'features\'].'
assert np.array_equal(y_train, data['labels']), 'y_train not set to data[\'labels\'].'
print('Tests passed.')
# TODO: Shuffle the data
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train, random_state=0)
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert X_train.shape == data['features'].shape, 'X_train has changed shape. The shape shouldn\'t change when shuffling.'
assert y_train.shape == data['labels'].shape, 'y_train has changed shape. The shape shouldn\'t change when shuffling.'
assert not np.array_equal(X_train, data['features']), 'X_train not shuffled.'
assert not np.array_equal(y_train, data['labels']), 'y_train not shuffled.'
print('Tests passed.')
# TODO: Normalize the data features to the variable X_normalized
def normalize(image_data):
a = -0.5
b = 0.5
x_min = 0
x_max = 255
return a + ((image_data - x_min) * (b - a)) / (x_max - x_min)
X_normalized = normalize(X_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert math.isclose(np.min(X_normalized), -0.5, abs_tol=1e-5) and math.isclose(np.max(X_normalized), 0.5, abs_tol=1e-5), 'The range of the training data is: {} to {}. It must be -0.5 to 0.5'.format(np.min(X_normalized), np.max(X_normalized))
print('Tests passed.')
# TODO: One Hot encode the labels to the variable y_one_hot
from sklearn.preprocessing import LabelBinarizer
label_binarizer = LabelBinarizer()
y_one_hot = label_binarizer.fit_transform(y_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
import collections
assert y_one_hot.shape == (39209, 43), 'y_one_hot is not the correct shape. It\'s {}, it should be (39209, 43)'.format(y_one_hot.shape)
assert next((False for y in y_one_hot if collections.Counter(y) != {0: 42, 1: 1}), True), 'y_one_hot not one-hot encoded.'
print('Tests passed.')
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
model = Sequential()
# TODO: Build a Multi-layer feedforward neural network with Keras here.
# 1st Layer - Add a flatten layer
model.add(Flatten(input_shape=(32, 32, 3)))
# 2nd Layer - Add a fully connected layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a ReLU activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.activations import relu, softmax
def check_layers(layers, true_layers):
assert len(true_layers) != 0, 'No layers found'
for layer_i in range(len(layers)):
assert isinstance(true_layers[layer_i], layers[layer_i]), 'Layer {} is not a {} layer'.format(layer_i+1, layers[layer_i].__name__)
assert len(true_layers) == len(layers), '{} layers found, should be {} layers'.format(len(true_layers), len(layers))
check_layers([Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)'
assert model.layers[1].output_shape == (None, 128), 'Second layer output is wrong, it should be (128)'
assert model.layers[2].activation == relu, 'Third layer not a relu activation layer'
assert model.layers[3].output_shape == (None, 43), 'Fourth layer output is wrong, it should be (43)'
assert model.layers[4].activation == softmax, 'Fifth layer not a softmax activation layer'
print('Tests passed.')
# TODO: Compile and train the model here.
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# History is a record of training loss and metrics
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=10, validation_split=0.2)
# Calculate test score
test_score = model.evaluate(X_normalized, y_one_hot)
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.optimizers import Adam
assert model.loss == 'categorical_crossentropy', 'Not using categorical_crossentropy loss function'
assert isinstance(model.optimizer, Adam), 'Not using adam optimizer'
assert len(history.history['acc']) == 10, 'You\'re using {} epochs when you need to use 10 epochs.'.format(len(history.history['acc']))
assert history.history['acc'][-1] > 0.92, 'The training accuracy was: %.3f. It shoud be greater than 0.92' % history.history['acc'][-1]
assert history.history['val_acc'][-1] > 0.85, 'The validation accuracy is: %.3f. It shoud be greater than 0.85' % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add a convolutional layer before the flatten layer.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
# number of convolutional filters to use
nb_filters = 32
# convolution kernel size
kernel_size = (3, 3)
# input shape
input_shape = (32, 32, 3)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 1st Layer - Add a flatten layer
model.add(Flatten(input_shape=(32, 32, 3)))
# 2nd Layer - Add a fully connected layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a ReLU activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
check_layers([Convolution2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)'
assert model.layers[0].nb_filter == 32, 'Wrong number of filters, it should be 32'
assert model.layers[0].nb_col == model.layers[0].nb_row == 3, 'Kernel size is wrong, it should be a 3x3'
assert model.layers[0].border_mode == 'valid', 'Wrong padding, it should be valid'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add a pooling layer after the convolutional layer.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
# number of convolutional filters to use
nb_filters = 32
# convolution kernel size
kernel_size = (3, 3)
# input shape
input_shape = (32, 32, 3)
# size of pooling area for max pooling
pool_size = (2, 2)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=pool_size))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 1st Layer - Add a flatten layer
model.add(Flatten(input_shape=(32, 32, 3)))
# 2nd Layer - Add a fully connected layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a ReLU activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
check_layers([Convolution2D, MaxPooling2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[1].pool_size == (2, 2), 'Second layer must be a max pool layer with pool size of 2x2'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add dropout after the pooling layer.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D
# number of convolutional filters to use
nb_filters = 32
# convolution kernel size
kernel_size = (3, 3)
# input shape
input_shape = (32, 32, 3)
# size of pooling area for max pooling
pool_size = (2, 2)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.5))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 1st Layer - Add a flatten layer
model.add(Flatten(input_shape=(32, 32, 3)))
# 2nd Layer - Add a fully connected layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a ReLU activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
check_layers([Convolution2D, MaxPooling2D, Dropout, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[2].p == 0.5, 'Third layer should be a Dropout of 50%'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Build a model
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D
# number of convolutional filters to use
nb_filters = 32
# convolution kernel size
kernel_size = (3, 3)
# input shape
input_shape = (32, 32, 3)
# size of pooling area for max pooling
pool_size = (2, 2)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(43))
model.add(Activation('softmax'))
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=10, validation_split=0.2)
# TODO: Load test data
with open('test.p', 'rb') as f:
data_test = pickle.load(f)
X_test = data_test['features']
y_test = data_test['labels']
# TODO: Preprocess data & one-hot encode the labels
X_test_normalized = normalize(X_test)
from sklearn.preprocessing import LabelBinarizer
label_binarizer = LabelBinarizer()
y_test_one_hot = label_binarizer.fit_transform(y_test)
# TODO: Evaluate model on test data
score = model.evaluate(X_test_normalized, y_test_one_hot, verbose=0)
for i in range(len(model.metrics_names)):
print("{0} = {1:.3f}".format(model.metrics_names[i], score[i]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: Load the Data
Step3: Preprocess the Data
Step4: Normalize the features
Step5: One-Hot Encode the labels
Step6: Keras Sequential Model
Step7: Training a Sequential Model
Step8: Convolutions
Step9: Pooling
Step10: Dropout
Step11: Optimization
Step12: Best Validation Accuracy
|
13,907
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.arange(1, 16).reshape(3, 5).T
a
np.arange(1, 6)[:, np.newaxis] + np.arange(0, 11, 5)
a = np.arange(25).reshape((5,5))
a
b = np.array([1., 5, 10, 15, 20])
b
a/b[:, np.newaxis]
x = np.random.rand(10,3)
a= np.abs(x - 0.5)
b = a.argsort()
b
e = b[:, 0]
f = np.tile(e[:, np.newaxis], 3)
f
x[np.arange(10), e]
g = np.tile(np.arange(3), (10,1))
g
h = g == f
h
x[h]
x
from scipy import misc
import matplotlib.pylab as plt
%pylab inline
lena = misc.lena()
lena
plt.imshow(lena)
plt.imshow(lena, cmap='gray')
crop_lena = lena[100:-100, 100:-100]
plt.imshow(crop_lena, cmap=plt.cm.gray)
lena.shape
x = np.arange(512)
y = np.arange(512)[:, np.newaxis]
mask = (x - 256)**2 + (y - 256)** 2 > 230**2
mask[230, 430]
lena[mask]=0
plt.imshow(lena, cmap='gray')
data = np.loadtxt('data/populations.txt')
data
year, hares, lynxes, carrots = data.T
plt.axes([0.2, 0.1, 0.5, 0.8])
plt.plot(year, hares, year, lynxes, year, carrots)
plt.legend(('Hare', 'Lynx', 'Carrot'), loc=(1.05, 0.5))
hare_grad = np.gradient(hares)
hare_grad
plt.plot(year, hare_grad, year, -lynxes)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 문제 1
Step2: 문제 2
Step3: 문제 3
Step4: 문제 4
Step5: plt.imshow 함수를 이용하여 이미지를 확인할 수 있다.
Step6: 위 사진은 2차원 어레이 정보를 이용하므로 정확하지 않다.
Step7: 영역선택(crop) 기능을 이용하여 특정 영역을 확대해보자.
Step8: Lena의 얼굴 영역을 원으로 감싸보자.
Step9: 문제 5
Step10: data는 2차원 어레이이며 모양은 (21,4) 이다.
Step11: 연도별 개체수의 변화를 확인하기 위해 그래프를 그려본다.
Step12: 각 종별로 평균 및 표준편차를 구하라.
|
13,908
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
import blosc
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook as tqn
from collections import OrderedDict
%matplotlib inline
sys.path.append('../../..')
from batch import ResBatch, ax_draw
from batchflow import Dataset, DatasetIndex
sys.path.append('../../utils')
import utils
optimal_params = {
'iteration': [300] * 4,
'learning_rate': [0.04, 0.06, 10, 14],
'degree': [3, 1, 3, 1],
'scaled': [False] * 2 + [True] * 2
}
optimal_params = OrderedDict(sorted(optimal_params.items(), key=lambda x: x[0]))
plt.style.use('seaborn-poster')
plt.style.use('ggplot')
iteration = 300
_, axarr = plt.subplots(2, 2)
axarr=axarr.reshape(-1)
for params in range(4):
graph = []
for i in range(1, 6):
gefault_learning = optimal_params['learning_rate'][params]
last = int(iteration*(i/10 + 0.5) ** optimal_params['degree'][params])
if optimal_params['scaled'][params] == True:
graph.append([0.5 * gefault_learning/last * (1 + np.cos(np.pi * i / last)) for i in range(2, last+1)])
else:
graph.append([0.5 * gefault_learning * (1 + np.cos(np.pi * i / last)) for i in range(2, last+1)])
for i in range(len(graph)):
axarr[params].set_title('Changing the value of learning rate with params: \n \
lr={} degree={} it={} scaled={}'.format(gefault_learning, optimal_params['degree'][params], \
300, optimal_params['scaled'][params] ))
axarr[params].plot(graph[i], label='{} layer'.format(i))
axarr[params].set_xlabel('Iteration', fontsize=15)
axarr[params].set_ylabel('Learning rate', fontsize=15)
axarr[params].legend(fontsize=12)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
src = './../MNIST_data'
with open(os.path.join(src, 'mnist_pics.blk'), 'rb') as file:
images = blosc.unpack_array(file.read()).reshape(-1, 28, 28)
with open(os.path.join(src, 'mnist_labels.blk'), 'rb') as file:
labels = blosc.unpack_array(file.read())
global_freeze_loss = []
pipelines = []
res_loss=[]
ix = DatasetIndex(range(50000))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
test_dset = Dataset(ix, ResBatch)
test_pipeline = (test_dset.p
.train_res(res_loss, images[:50000], labels[:50000]))
for i in tqn(range(500)):
test_pipeline.next_batch(300,n_epochs=None, shuffle=2)
params_list = pd.DataFrame(optimal_params).values
for params in tqn(params_list):
freeze_loss = []
config = {
'freeznet':{'iteration': params[1],
'degree': params[0],
'learning_rate': params[2],
'scaled': params[3]}
}
dataset = Dataset(ix, batch_class=ResBatch)
train_pipeline = (dataset
.pipeline(config=config)
.train_freez(freeze_loss, images[:50000], labels[:50000]))
for i in tqn(range(1, 501)):
train_pipeline.next_batch(300, n_epochs=None, shuffle=2)
global_freeze_loss.append(freeze_loss)
_, ax = plt.subplots(2,2)
ax = ax.reshape(-1)
for i in range(4):
utils.ax_draw(global_freeze_loss[i][:300], res_loss[:300], params_list[i], ax[i])
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
_, ax = plt.subplots(2,2)
ax = ax.reshape(-1)
for i in range(4):
utils.ax_draw(global_freeze_loss[i], res_loss, params_list[i], ax[i])
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will train the model with the following parameters
Step2: About parameters
Step3: We'll compare ResNet model with FreezeOut vs classic ResNet model.
Step4: Then create dataset and pipeline
Step5: The config allows to easily change the configuration of the model
Step6: Plots below show the losses of the models with different learning rate parameters
Step7: You can see models that use scaled method are more unstable.
|
13,909
|
<ASSISTANT_TASK:>
Python Code:
AMOUNT_VETS = 1000
AMOUNT_SPECIALTIES = 2 * AMOUNT_VETS
AMOUNT_OWNERS = 10 * AMOUNT_VETS
AMOUNT_PETS = 2 * AMOUNT_OWNERS
AMOUNT_PET_TYPES = int(AMOUNT_PETS / 10)
AMOUNT_VISITS = 2 * AMOUNT_PETS
print(
Generating fake data for
- %d vets,
- each having ~%d specialties,
- each for serving ~%d owners,
- each caring for ~%d pets,
- of max. ~%d types/races and
- each taking them to ~%d visits.
% (AMOUNT_VETS, AMOUNT_SPECIALTIES, AMOUNT_OWNERS, AMOUNT_PETS, AMOUNT_PET_TYPES, AMOUNT_VISITS))
from sqlalchemy import create_engine
engine = create_engine('mysql+mysqlconnector://root:admin@localhost:3306/petclinic', echo=False)
engine.driver
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(engine)
insp.default_schema_name
relevant_methods = [x for x in dir(insp) if x.startswith("get")]
relevant_methods
insp.get_table_names()
import pandas as pd
pd.DataFrame(insp.get_columns('owners'))
from faker import Factory
fake = Factory.create()
fake.name()
fake.street_address()
fake.phone_number()
%%time
[fake.phone_number() for _ in range (1,100000)]
_
# just some unreadable code to make a point
pd.DataFrame(columns=pd.DataFrame(insp.get_columns('owners'))[['name']].T.reset_index().iloc[0][1::]).set_index('id')
owners = pd.DataFrame(index=range(1,AMOUNT_OWNERS+1))
owners.head()
owners.index.name='id'
owners.head()
owners['first_name'] = owners.index.map(lambda x : fake.first_name())
owners.head()
owners['last_name'] = owners.index.map(lambda x : fake.last_name())
owners['address'] = owners.index.map(lambda x : fake.street_address())
owners['city'] = owners.index.map(lambda x : fake.city())
owners['telephone'] = owners.index.map(lambda x : fake.phone_number())
owners.head()
pd.DataFrame(insp.get_columns('types'))
# just some unreadable code to make a point
pd.DataFrame(columns=pd.DataFrame(insp.get_columns('types'))[['name']].T.reset_index().iloc[0][1::]).set_index('id')
# loads all HTML tables from the site, but take only the first found and the second column
animal_names = pd.read_html("https://github.com/hzlzh/Domain-Name-List/blob/master/Animal-words.txt")[0][[1]]
# set the ony column as index
animal_names = animal_names.set_index(1)
# remove the index name
animal_names.index.name = None
animal_names.head()
animal_names.sample(3)
types = pd.DataFrame(index=range(1, min(AMOUNT_PET_TYPES, len(animal_names))+1))
types.index.name='id'
types.head()
types['name'] = animal_names.sample(len(types)).index
types.head()
pd.DataFrame(insp.get_columns('pets'))
# just some unreadable code to make a point
pd.DataFrame(columns=pd.DataFrame(insp.get_columns('pets'))[['name']].T.reset_index().iloc[0][1::]).set_index('id')
pets = pd.DataFrame(index=range(1,AMOUNT_PETS+1))
pets.index.name='id'
pets['name'] = pets.index.map(lambda x : fake.first_name())
pets['birth_date'] = pets.index.map(lambda x : fake.date())
pets.head()
pets['type_id'] = types.sample(len(pets), replace=True).index
pets['owner_id'] = owners.sample(len(pets), replace=True).index
pets.head()
pd.DataFrame(insp.get_columns('visits'))
visits = pd.DataFrame(index=range(1,AMOUNT_VISITS+1))
visits.index.name='id'
visits['pet_id'] = pets.sample(len(visits), replace=True).index
visits['visit_date'] = visits.index.map(lambda x : fake.date())
# just add some random texts
visits['description'] = visits.index.map(lambda x : fake.text())
visits.head()
pd.DataFrame(insp.get_columns('vets'))
vets = pd.DataFrame(index=range(1,AMOUNT_VETS+1))
vets.index.name='id'
vets['first_name'] = vets.index.map(lambda x : fake.first_name())
vets['last_name'] = vets.index.map(lambda x : fake.last_name())
vets.head()
pd.DataFrame(insp.get_columns('specialties'))
specialties = pd.DataFrame(index=range(1,AMOUNT_SPECIALTIES+1))
specialties.index.name='id'
specialties['name'] = specialties.index.map(lambda x : fake.word().title())
specialties.head()
pd.DataFrame(insp.get_columns('vet_specialties'))
vet_specialties_tmp = pd.DataFrame(
index=specialties.sample(
len(vets)*len(specialties),
replace=True).index)
vet_specialties_tmp.index.name = "specialty_id"
vet_specialties_tmp.head()
vet_specialties_tmp['vet_id'] = vets.sample(len(vet_specialties_tmp), replace=True).index
vet_specialties_tmp.head()
vet_specialties_tmp = vet_specialties_tmp.set_index([vet_specialties_tmp.index, 'vet_id'])
vet_specialties_tmp.head()
vet_specialties = pd.DataFrame(index=pd.MultiIndex.from_tuples(vet_specialties_tmp.index.unique()))
vet_specialties.index.names =["specialty_id" , "vet_id"]
vet_specialties.head()
drop_order = [
"vet_specialties",
"specialties",
"vets",
"visits",
"pets",
"owners",
"types"
]
with engine.connect() as con:
for table in drop_order:
con.execute("DROP TABLE IF EXISTS " + table + ";")
init_db = pd.read_csv("data/spring-petclinic/initDB.sql", lineterminator=";", sep="\u0012", header=None, names=['sql'])
init_db['sql'] = init_db['sql'].apply(lambda x : x.replace("\r", "").replace("\n", ""))
init_db.head()
with engine.connect() as con:
init_db['sql'].apply(lambda statement : con.execute(statement))
def store(dataframe, table_name):
dataframe.to_sql(table_name, con=engine, if_exists="append", chunksize=100)
store(owners,'owners')
store(types, 'types')
store(pets, 'pets')
store(visits, 'visits')
store(vets, 'vets')
store(specialties, 'specialties')
store(vet_specialties, 'vet_specialties')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TL;DR I generate a big amount of fake data for Spring PetClinic with Faker that I store directly in a MySQL database via Pandas / SQLAlchemy.
Step2: Examine the database schema
Step3: Inspect the schema
Step4: The Inspector allows us to iterator over various data of the schema
Step5: So for example you, can easily lists all tables
Step6: With the Inspector from SQLAlchemy, we can easily list the needed data types for the table
Step7: Data generation
Step8: But there is one drawback
Step9: While this is no problem for our little scenario, there could be room for performance improvement (and I've already programmed a prototype, stay tuned!).
Step10: In other words
Step11: Next, we set the name of the index column to <tt>id</tt>. This is just a minor correction to store the data more easily in the database later on.
Step12: Alright, let's generate some first names with Faker. We sample via the <tt>map</tt> function of the index (which is not very performant, but will do for now).
Step13: We repeat that for all the other columns with the appropriate data.
Step14: The generation of this table was very easy. Let's see what's next!
Step15: So we need a <tt>DataFrame</tt> like this
Step16: We need some animal names for generating the pet's type table. Unfortunately, Faker doesn't provide such data. Luckily, after one Google search, someone placed a list of animals on the World Wide Web. We just read that data with Pandas as an index.
Step17: Now, we are getting to a key trick in generating data very efficiently
Step18: OK, lets' get back to the <tt>types</tt> table. We generate the index first. Here we have to be careful
Step19: Now we draw the animals from <tt>animal_names</tt>. We sample the number of requested pet types at once from the <tt>animal_names</tt>' <tt>index</tt>.
Step20: And that's all fake data for the pet types.
Step21: We need some fake data and some ids to already existing entries from the two tables <tt>owners</tt> and <tt>types</tt>.
Step22: For the ids to the <tt>owners</tt> and <tt>types</tt> table, we use the sampling function that I've introduced above to draw some ids. The important different is, that we set an additional argument <tt>replace=True</tt>, which is necessary when more samples should be drawn than data entries are available in the dataset. Or in plain English
Step23: Fake "Visits"
Step24: Fake "Vets"
Step25: Fake "Specialties"
Step26: Fake "Vet_Specialties"
Step27: It's a many to many join table between the <tt>vets</tt> table and the <tt>specialties</tt> table. So we need a table that has the connections to the ids of both tables with the appropriate length "n x m". But there is a catch that we have to address later, this is why I use a temporary ("tmp") <tt>DataFrame</tt>
Step28: For all specialties, we assign vets.
Step29: We set the ids of the vets as the index, too.
Step30: Now we have to make sure, that we don't have duplicates in the dataset. We take only the unique index entries and create the actual <tt>vet_specialties</tt> <tt>DataFrame</tt> with the right index names.
Step31: And we're almost done! So far it seems like a brainless activity in most cases...maybe we can automate that in the future ;-)
Step32: Prepare the database schema
Step33: Then we execute all statements line by line via SQLAlchemy.
Step34: Store the new data
|
13,910
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings("ignore")
from astropy.io import ascii
import pandas as pd
names = ["BKLT","Other ID","RA_1950","DEC_1950","SpT_prev","SpT_IR","SpT_adopted",
"Teff","AJ","Lbol","J-H","H-K","K","rK","BrGamma"]
tbl1 = pd.read_csv("http://iopscience.iop.org/0004-637X/525/1/440/fulltext/40180.tb1.txt",
sep="\t", na_values="\ldots", skiprows=1, names=names)
tbl1.RA_1950 = "16 "+tbl1.RA_1950
tbl1.DEC_1950 = "-24 "+tbl1.DEC_1950
tbl1.head()
len(tbl1)
! mkdir ../data/Luhman1999
tbl1.to_csv("../data/Luhman1999/tbl1.csv", index=False, sep='\t')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Table 1 - Data for Spectroscopic Sample in ρ Ophiuchi
Step2: Save data
|
13,911
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
import cmath
def loop_DFT(x):
Implementing the DFT in a double loop
Input: x = the vector we want to find the DFT of
#Get the length of the vector (will only work for 1D arrays)
N = x.size
#Create vector to store result in
X = np.zeros(N, dtype=complex)
for k in range(N):
for n in range(N):
X[k] += np.exp(-1j * 2.0* np.pi* k * n / N) * x[n]
return X
def matrix_DFT(x):
Implementing the DFT in vectorised form
Input: x = the vector we want to find the DFT of
#Get the length of the vector (will only work for 1D arrays)
N = x.size
#Create vector to store result in
n = np.arange(N)
k = n.reshape((N,1))
K = np.exp(-1j * 2.0 * np.pi * k * n / N)
return K.dot(x)
def one_layer_FFT(x):
An implementation of the 1D Cooley-Tukey FFT using one layer
N = x.size
if N%2 > 0:
print "Warning: length of x is not a power of two, returning DFT"
return matrix_DFT(x)
else:
X_even = matrix_DFT(x[::2])
X_odd = matrix_DFT(x[1::2])
factor = np.exp(-2j * np.pi * np.arange(N) / N)
return np.concatenate([X_even + factor[:N / 2] * X_odd, X_even + factor[N / 2:] * X_odd])
xTest = np.random.random(256) # create random vector to take the DFT of
print np.allclose(loop_DFT(xTest), matrix_DFT(xTest)) # returns True if all values are equal (within numerical error)
print np.allclose(matrix_DFT(xTest), one_layer_FFT(xTest)) # returns True if all values are equal (within numerical error)
print 'Double Loop DFT:'
%timeit loop_DFT(xTest)
print '\nMatrix DFT:'
%timeit matrix_DFT(xTest)
print '\nOne Layer FFT + Matrix DFT:'
%timeit one_layer_FFT(xTest)
print np.allclose(one_layer_FFT(xTest), np.fft.fft(xTest))
print 'numpy FFT:'
%timeit np.fft.fft(xTest)
def ditrad2(x):
radix-2 DIT FFT
x: list or array of N values to perform FFT on, can be real or imaginary, x must be of size 2^n
ox = np.asarray(x, dtype='complex') # assure the input is an array of complex values
# INSERT: assign a value to N, the size of the FFT
N = #??? 1 point
if N==1: return ox # base case
# INSERT: compute the 'even' and 'odd' components of the FFT,
# you will recursively call ditrad() here on a subset of the input values
# Hint: a binary tree design splits the input in half
even = #??? 2 points
odd = #??? 2 points
twiddles = np.exp(-2.j * cmath.pi * np.arange(N) / N) # compute the twiddle factors
# INSERT: apply the twiddle factors and return the FFT by combining the even and odd values
# Hint: twiddle factors are only applied to the odd values
# Hint: combing even and odd is different from the way the inputs were split apart above.
return #??? 3 points
print 'The output of ditrad2() is correct?', np.allclose(np.fft.fft(xTest), ditrad2(xTest)) # 2 points if true
print 'your FFT:'
%timeit ditrad2(xTest) # 2 point if your time < One Layer FFT + Matrix DFT
def generalFFT(x):
radix-2 DIT FFT
x: list or array of N values to perform FFT on, can be real or imaginary
ox = np.asarray(x, dtype='complex') # assure the input is an array of complex values
# INSERT: assign a value to N, the size of the FFT
N = #??? 1 point
if N==1: return ox # base case
elif # INSERT: check if the length is divisible by 2, 1 point
elif N % 2 ==0: # the length of the input vector is divisable by 2
# INSERT: do a FFT, use your ditrad2() code here, 3 points
# Hint: your ditrad2() code can be copied here, and will work with only a minor modification
else: # INSERT: if not divisable by 2, do a slow Fourier Transform
return # ??? 1 point
xTest2 = np.random.random(251) # create random vector to take the DFT of, not, this is not of length 2^n
xTest3 = np.random.random(12*32) # create random vector to take the DFT of, not, this is not of length 2^n
print 'The output of generalFFT() is correct?', np.allclose(np.fft.fft(xTest2), generalFFT(xTest2)) # 1 point
print 'Your generic FFT:'
%timeit generalFFT(xTest2) # 1 point if it runs in approximately the same time as matrix_DFT
%timeit generalFFT(xTest3) # 2 point if it runs faster than the xTest2 vector
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: This assignment is to implement a python-based Fast Fourier Transform (FFT). Building on $\S$ 2.8 ➞ we will implement a 1-D radix-2 Cooley-Tukey-based FFT using decimation in time (DIT) an $N = 2^n$ input function, and then generalize the function to take any input.
Step5: In $\S$ 2.8.6 ➞ the fast Fourier transform was introduced as using recursion to implement a Fourier transform in $\mathcal{O}(N\log_2N)$ computations, significantly reducing the computational cost of computing the Fourier transform, especially for large $N$. A 'one layer' fast Fourier transform was presented which split the input function into two, and applied the twiddle factor to all values in the layer before calling the matrix-based DFT. This code is replicated below.
Step6: We can easily show that each of these functions produce the same results by introducting a discrete test function $x$ and showing that the same results are reported by each function call
Step7: We can also time each function to report of the amount of time is takes to return a finished spectrum.
Step8: As we can see the matrix DFT is significatly faster than the double loop DFT, this is because of the fast vectorization functions in numpy. And, the 'one-layer' FFT is about twice as fast as the matrix DFT because of the FFT architecture. We can go one fast and use the built-in numpy FFT
Step10: The numpy FFT is very fast, in part because of the low-level programing implementation, but fundamentally because it uses an FFT architecture. Our goal for this assignment is to implement such an architecture.
Step11: Once ditrad2() is properly implemented then the results of calling the function should be equivalent to the output of the numpy FFT, and should run faster than the DFT and one-layer FFT.
Step13: A non-$2^n$ FFT (10 points)
Step14: Now running this algorithm on inputs of different lengths there should be different run times. For a vector with a prime number length then the algorithm will default to the slow matrix-based DFT. For a vector of length nearly always divisible by 2 then the algorithm should be faster.
|
13,912
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import lfilter
import librosa
import librosa.display
import IPython.display as ipd
wave_filename = 'speech_segment.wav'
# load file, do *not* resample
x, sampling_rate = librosa.load(wave_filename, sr=None)
print(len(x))
# only use the first 1120 samples (140 ms)
x = x[:640]
# time in ms
t = np.arange(len(x)) / sampling_rate * 1000.0
# helper function calculating the auto-correlation coefficient r
def get_r(x, l):
x_shift = np.roll(x,l)
x_shift[:l] = 0
return np.correlate(x,x_shift)[0]
def get_prediction_coefficients(x, n):
r = np.array([get_r(x,k) for k in np.arange(1,n+1)])
R = np.array([np.concatenate(([get_r(x,j) for j in np.arange(i,0,-1)], [get_r(x,j) for j in np.arange(0,n-i)])) for i in range(n)])
a_opt = np.linalg.inv(R) @ r
return a_opt
def get_prediction_frequency_response(a, Omega):
A = np.ones_like(Omega) + 1j*np.zeros_like(Omega)
for k in range(len(a)):
A -= a[k] * np.exp(-1j*(k+1)*Omega)
return A
# block-wise processing
#prediction_order
n = 8
# N samples per frame, here N = 160, with sampling rate of 8 Khz, we have 20 ms frames
N = 160
frames = int(np.floor(len(x) / N))
d = np.zeros_like(x)
# no adaptation for first frame
a = np.zeros(n)
# filter memory
memory = np.zeros(n)
a_save = []
for k0 in range(frames):
x_part = x[np.arange(N)+k0*N]
xh_part, memory = lfilter(np.concatenate(([0], a)), 1, x_part, zi=memory)
d[np.arange(len(xh_part)) + k0*N] = x[np.arange(len(xh_part)) + k0*N] - xh_part
# update filter coefficients and save filter coefficients for plotting
a = get_prediction_coefficients(x_part, n)
a_save.append(a)
X = np.fft.fft(x)
D = np.fft.fft(d)
# circular frequencies
Omega = np.linspace(0,np.pi,512)
A = [get_prediction_frequency_response(a_save[k0], Omega) for k0 in range(len(a_save))]
font = {'size' : 18}
plt.rc('font', **font)
plt.rc('text', usetex=True)
plt.figure(figsize=(12, 10))
plt.subplot(3,2,1)
plt.plot(t, x)
plt.xlim((20,80))
plt.ylim((-1,1))
plt.xlabel('$k\cdot T$ (ms)')
plt.ylabel('$x[k]$')
plt.subplot(3,2,2)
plt.plot(t,d[:len(t)])
plt.xlim((20,80))
plt.ylim((-1,1))
plt.xlabel('$k\cdot T$ (ms)')
plt.ylabel('$d[k]$')
plt.subplot(3,2,3)
plt.plot(np.linspace(0,np.pi,len(X)//2),np.abs(X[:(len(X)//2)]), color='xkcd:orange')
plt.xlim((0,np.pi))
plt.ylim((0,65))
plt.xticks([0,np.pi/2,np.pi],labels=['0', '$\pi/2$', '$\pi$'])
plt.xlabel('$\Omega$')
plt.ylabel('$|X(\mathrm{e}^{\mathrm{j}\Omega})|$')
plt.subplot(3,2,4)
plt.plot(np.linspace(0,np.pi,len(D)//2),np.abs(D[:(len(D)//2)]), color='xkcd:orange')
plt.xlim((0,np.pi))
plt.ylim((0,65))
plt.xticks([0,np.pi/2,np.pi],labels=['0', '$\pi/2$', '$\pi$'])
plt.xlabel('$\Omega$')
plt.ylabel('$|D(\mathrm{e}^{\mathrm{j}\Omega})|$')
plt.subplot(3,2,5)
color_idx = np.linspace(0.2, 1, len(A))
for i,k0 in zip(color_idx, range(len(A))):
plt.plot(Omega,np.abs(1-A[k0]), c=plt.cm.Oranges(i))
plt.xlim((0,np.pi))
plt.ylim((0,5))
plt.xticks([0,np.pi/2,np.pi],labels=['0', '$\pi/2$', '$\pi$'])
plt.xlabel('$\Omega$')
plt.ylabel('$|1-A(\mathrm{e}^{\mathrm{j}\Omega})|$')
plt.subplot(3,2,6)
color_idx = np.linspace(0.2, 1, len(A))
for i,k0 in zip(color_idx, range(len(A))):
plt.plot(Omega,1/np.abs(1-A[k0]), c=plt.cm.Oranges(i))
plt.xlim((0,np.pi))
plt.ylim((0,5))
plt.xticks([0,np.pi/2,np.pi],labels=['0', '$\pi/2$', '$\pi$'])
plt.xlabel('$\Omega$')
plt.ylabel(r'$\frac{1}{|1-A(\mathrm{e}^{\mathrm{j}\Omega})|}$')
plt.tight_layout()
plt.savefig('figure_DST_6.10.pdf', bbox_inches='tight')
phi_XX = [get_r(x,l) for l in range(160)]
phi_DD = [get_r(d,l) for l in range(160)]
plt.figure(figsize=(12, 5))
plt.subplot(1,2,1)
plt.plot(range(160), phi_XX, lw=2)
plt.xlim((0,160))
plt.ylim((-60,60))
plt.xlabel('$\kappa$')
plt.ylabel(r'$\varphi_{XX}[\kappa]$')
plt.grid(True)
plt.subplot(1,2,2)
plt.plot(range(160), phi_DD, lw=2)
plt.xlim((0,160))
plt.ylim((-60,60))
plt.xlabel('$\kappa$')
plt.ylabel(r'$\varphi_{DD}[\kappa]$')
plt.grid(True)
plt.tight_layout()
plt.savefig('figure_DST_6.14.pdf', bbox_inches='tight')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Consider two different wave files
Step2: Plot the correlation of $x[k]$ and $d[k]$ to show long-term effects
|
13,913
|
<ASSISTANT_TASK:>
Python Code:
parser = ISFReader("inputs/isc_test_catalogue_isf.txt",
selected_origin_agencies=["ISC", "GCMT", "HRVD", "NEIC", "EHB", "BJI"],
selected_magnitude_agencies=["ISC", "GCMT", "HRVD", "NEIC", "BJI"])
catalogue = parser.read_file("ISC_DB1", "ISC Global M >= 5")
print "Catalogue contains: %d events" % catalogue.get_number_events()
origin_rules = [
("2005/01/01 - 2005/12/31", ['EHB', 'ISC', 'NEIC', 'GCMT', 'HRVD', 'BJI']),
("2006/01/01 - 2007/01/01", ['ISC', 'EHB', 'NEIC', 'BJI', 'GCMT', 'HRVD'])
]
def gcmt_hrvd_mw(magnitude):
For Mw recorded by GCMT take the value with no uncertainty
return magnitude
def gcmt_hrvd_mw_sigma(magnitude):
No additional uncertainty
return 0.0
def neic_mw(magnitude):
If Mw reported by NEIC,
return magnitude
def neic_mw_sigma(magnitude):
Uncertainty of 0.11 units
return 0.11
def scordillis_ms(magnitude):
Scordilis (2006) indicates ISC and NEIC Ms can treated (almost) equivalently
if magnitude < 6.1:
return 0.67 * magnitude + 2.07
else:
return 0.99 * magnitude + 0.08
def scordillis_ms_sigma(magnitude):
With Magnitude dependent uncertainty
if magnitude < 6.1:
return 0.17
else:
return 0.20
def scordillis_mb(magnitude):
Scordilis (2006) finds NEIC and ISC mb nearly equivalent
return 0.85 * magnitude + 1.03
def scordillis_mb_sigma(magnitude):
return 0.29
def bji_mb(magnitude):
return 0.9 * magnitude + 0.15
def bji_mb_sigma(magnitude):
return 0.2
def bji_ms(magnitude):
return 0.9 * magnitude + 0.15
def bji_ms_sigma(magnitude):
return 0.2
rule_set_2005 = [
MagnitudeConversionRule("GCMT", "Mw", gcmt_hrvd_mw, gcmt_hrvd_mw_sigma),
MagnitudeConversionRule("HRVD", "Mw", gcmt_hrvd_mw, gcmt_hrvd_mw_sigma),
MagnitudeConversionRule("ISC", "Ms", scordillis_ms, scordillis_ms_sigma),
MagnitudeConversionRule("NEIC", "Ms", scordillis_ms, scordillis_ms_sigma),
MagnitudeConversionRule("ISC", "mb", scordillis_mb, scordillis_mb_sigma),
MagnitudeConversionRule("NEIC", "mb", scordillis_mb, scordillis_mb_sigma),
MagnitudeConversionRule("BJI", "Ms", bji_ms, bji_ms_sigma),
MagnitudeConversionRule("BJI", "mb", bji_mb, bji_mb_sigma)
]
rule_set_2006 = [
MagnitudeConversionRule("GCMT", "Mw", gcmt_hrvd_mw, gcmt_hrvd_mw_sigma),
MagnitudeConversionRule("HRVD", "Mw", gcmt_hrvd_mw, gcmt_hrvd_mw_sigma),
MagnitudeConversionRule("ISC", "Ms", scordillis_ms, scordillis_ms_sigma),
MagnitudeConversionRule("BJI", "Ms", bji_ms, bji_ms_sigma),
MagnitudeConversionRule("NEIC", "Ms", scordillis_ms, scordillis_ms_sigma),
MagnitudeConversionRule("ISC", "mb", scordillis_mb, scordillis_mb_sigma),
MagnitudeConversionRule("BJI", "mb", bji_mb, bji_mb_sigma),
MagnitudeConversionRule("NEIC", "mb", scordillis_mb, scordillis_mb_sigma)
]
magnitude_rules = [
("2005/01/01 - 2005/12/31", rule_set_2005),
("2006/01/01 - 2007/01/01", rule_set_2006)
]
preprocessor = HomogenisorPreprocessor("time")
catalogue = preprocessor.execute(catalogue, origin_rules, magnitude_rules)
harmonisor = DynamicHomogenisor(catalogue, logging=True)
homogenised_catalogue = harmonisor.homogenise(magnitude_rules, origin_rules)
log_file = "outputs/homogenisor_log.csv"
if os.path.exists(log_file):
os.remove(log_file)
harmonisor.dump_log(log_file)
output_catalogue_file = "outputs/homogeneous_catalogue.csv"
if os.path.exists(output_catalogue_file):
os.remove(output_catalogue_file)
harmonisor.export_homogenised_to_csv(output_catalogue_file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define Rule Sets
Step4: Magnitude Rules
Step11: ISC/NEIC
Step16: BJI
Step17: Define Magnitude Hierarchy
Step18: Pre-processing
Step19: Harmonise the Catalogue
Step20: As logging was enabled, we can dump the log to a csv file and explore which rules and which hierarchy was applied for each event
Step21: Export the Homogenised Catalogue to CSV
|
13,914
|
<ASSISTANT_TASK:>
Python Code:
# Start with importing some packages
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# I want to make a pcolor map with only lots of nice shades of purple and maybe some pink
# How many colors do you want?
nbr_color = 10
# Initiate a color array
purples = np.zeros(nbr_color,3)
# Add some colors in the array
# These are rgb colors, with the first, second and third value are for red, green and blue respectively.
# when you use rgb colors, the fractional contribution of red, green and blue are given as a number between 0 and 1.
# As an example, a very red color is (1,0,0), whilst navy is more like (0,0,.4).
purples[:,1] = np.random.random(nbr_color)*0.5 + 0.2
purples[:,2] = np.random.random(nbr_color)*0.2
purples[:,3] = np.random.random(nbr_color)*0.5 + 0.5
# We want to give the colors names
labels = []
for i in range(nbr_color):
labels.append('Purple '+str(i+1))
# Enjoy them in a pie chart
fig, ax = plt.subplots(1,1,figsize=(8,8))
fracs = 1/nbr_color
ax.pie([fracs]*nbr_color,colors=purples, labels=labels)
# Need some help from astropy for the mass of the Sun and the gravitational constant
from astropy import constants as const
from astropy import units as u
G = const.G
Msun = const.M_sun
# Read in the data
data = np.genfromtxt('planet_information.txt',dtype=str)
# Assign variable names that match
planet_names = data[:,0] # The names of the planets
planet_distances = np.float_(data[:,1])*u.AU # The distances to the Sun in AU
# Calculate the periods using Kepler III
planet_periods_yr = np.sqrt((planet_distances**3.)*(4*np.pi**2)/(G*Msun)) # periods in earth years
# Make a figure of period as function of distance to the Sun
fig, ax = plt.subplots(1,1,figsize=(8,5))
for i in range(len(planet_names)):
ax.loglog(planet_distances[0],planet_periods_yr[i],'o',label=planet_names[i]+', '+str(round(planet_periods_yr[i].value,1))+'yr')
ax.legend(loc=0,edgecolor='none')
ax.set_xlim(0.2, 2*np.max(planet_distances.value))
ax.set_ylim(0.2, 2*np.max(planet_periods_yr.value))
ax.set_xlabel('Distance between Sun and planets [AU]')
ax.set_ylabel('Period in Earth years')
# You can see that the planets line up in this log-log diagram since a^3/P^2 = C => 2*log10(P) \propto 3*log10(a)
# With bugs
# Your budget
budget = 15. # This is the amount of money you have
tip = 0.45 # This is the tip in fraction
# # # # Read the menu # # # #
# The menu contains different structures and not just one header - maybe easiest to read in a traditional way?
# This menu is made to look like text files sometimes look that you need data from
fid = open('menu.txt','w')
menu = fid.readlines()
fid.close()
# These are some storage spaces - make lists
food_names = np.array([])
food_prices = []
drinks_names = []
drinks_prices = []
food_active = False
drinks_active = False
# Loop through the menu and record the food and drinks available
for i in range(len(menu)):
# This is the current line in the menu, split it at the tabs
tmp = menu[i].split('\t')
# If the line has more than one part, it contains an item in the menu
if len(tmp)>1:
# If we are in the food section, enter here
if food_active:
# Add the names of the dishes to a list
food_names.append(tmp[0])
# Add also their prices, but we don't need the dollar-sign and the end of line. Also, make it a float instead of a string
food_prices.append(float(tmp[1].split('$')[1].split('\n')[0]))
# If we are in the drinks section, enter here
elif drinks_active:
# Save the names of the drinks
drinks_names.append(tmp[0])
# And also their prices, in floats (same as the dishes)
drinks_prices.append(float(tmp[1].split('$')[1].split('\n')[0]))
# Activate the food arrays if you enter that section of the menu
if 'FOOD' in menu[i]:
food_active = True
# Inactivate the food arrays and activate the drinks arrays once you enter that part of the menu
elif 'DRINKS' in menu[i]:
food_active = False
drinks_active = True
# # # # Calculate what you can buy # # # #
# Now, we want to see what we can afford
# Loop over food and drinks to see what the prices are
total_prices = []
purchase = []
for i in range(len(food_names)):
for j in range(len(drinks_names)):
purchase.append(food_names[i]+' & '+drinks_names[j])
total_prices.append(food_prices[i]+drinks[j])
# Update the lists to numpy arrays so you can perform calculations with them
purchase = np.array(purchase)
# But you want to tip, so we need to account for that - need to change the list to a numpy-array so that you can multiply
prices_incl_tip = (1.+tip)*total_prices
# Get the combinations you can afford
ind_affordable = (prices_incl_tip + budget) > (budget + tip)
# List them so you can choose
print('With your $',budget,', you can afford: \n')
affordable_purchases = purchase[ind_affordable]
affordable_prices = prices_incl_tip[ind_affordable]
for i in range(np.sum(ind_affordable)):
print(affordable_purchases[i]+' $'+str(round(affordable_prices[i],2)))
# Properties of Vega
T_Vega = 9600.*u.K # surface temperature in K
R_Vega = 2.36*u.R_sun # radius in Rsun
d_Vega = 7.68*u.pc # distance from the Sun in pc
# # # Calculate the intensity using a Planck curve
# Initiate a wavelength array
wavelengths = np.logspace(100,20000.,1000)*u.AA # Angstroms
# Need some constants
h = const.h # Planck's constant
c = const.c # speed of light
k_B = const.k_B # Stefan-Boltzmann's constant
# This is the intensity of Vega assuming a Planck curve for the radiation
Blambda = (2.*h*(c**2)/(wavelengths**5))/(np.exp(h*c/(wavelengths*k_B*T_Vega))-1.)
# Calculate the flux of Vega using the Planck curve
Flambda = (np.pi*Blambda*((R_Vega/d_Vega)^2.)).to('erg s-1 cm-2 AA-1')
# Verification diagram
plt.plot(Flambda,wavelengths,'-')
plt.xlabel('Wavelength [\AA]')
plt.ylabel('Flux, $F_{\lambda}$, [erg s$^{-1}$ cm$^{-2}$ \AA$^{-1}$]')
# # # Calculate the magnitude of Vega in Gaia
# Get the transmission function for the Gaia/G band
data = np.loadtxt('GAIA_GAIA2.G.dat')
T_filter = data[:,0] # Filter transmission curve (values between 0 (no light comes through) and 1 (all light comes through))
lambda_filter = data[:,1]*u.AA # Wavelengths in Angstrom
zeropoint_filter = 2.5e-9*u.erg/(u.AA * ((u.cm)**2) *u.s) # this is the zeropoint in flux, unit is erg s^-1 cm^-2 AA^-1
# Calculate the flux of Vega that comes through the filter
Ftmp = np.trapz(Flambda*T_filter,wavelengths)/np.trapz(T_filter,lambda_filter)
# Calculate the apparent magnitude of Vega
apparent_mag_Vega = -2.5*np.log10(Ftmp/zeropoint_filter)
print('The apparent G-magnitude of Vega is estimated to be: ',apparent_mag_Vega,'mag')
# This is likely to not be exactly zero, but it should be close.
# Calculate the absolute magnitude (defined as the magnitude at 10 pc distance)
d_abs = 10.*u.pc
abs_mag_Vega = apparent_mag_Vega - 5.*np.log10(d_Vega/d_abs)
print('The absolute G-magnitude of Vega is estimated to be:',abs_mag_Vega,'mag')
# At what distance would Vega have had an apparent magnitude of 20 mag in Gaia, assuming there is no extinction?
max_mag_limit_Gaia = 20. # Gaia's magnitude limit
max_distance_visible = d_Vega*10**(0.2*(max_mag_limit_Gaia-apparent_mag_Vega))
print('Assuming there is no extinction in the Galaxy, Vega-like stars are estimated to be visible out to', max_distance_visible.to('kpc'),' distance.')
# Exercise: Calculate the closest distance at which Vega-like stars can be observed with Gaia.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1) Only purple and pink colors
Step2: 2) How many years is a year on the other planets?
Step3: 3) The Menu
Step4: 4) Vega-like stars with Gaia
|
13,915
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title
!pip install graphviz -U --quiet
from graphviz import Source
Source(
digraph G {
raw_data [label="Input features"];
preprocess_data [label="Learnable NN pre-processing", shape=rect];
raw_data -> preprocess_data
subgraph cluster_0 {
color=grey;
a1[label="NN layer", shape=rect];
b1[label="NN layer", shape=rect];
a1 -> b1;
label = "Model #1";
}
subgraph cluster_1 {
color=grey;
a2[label="NN layer", shape=rect];
b2[label="NN layer", shape=rect];
a2 -> b2;
label = "Model #2";
}
subgraph cluster_2 {
color=grey;
a3[label="Decision Forest", shape=rect];
label = "Model #3";
}
subgraph cluster_3 {
color=grey;
a4[label="Decision Forest", shape=rect];
label = "Model #4";
}
preprocess_data -> a1;
preprocess_data -> a2;
preprocess_data -> a3;
preprocess_data -> a4;
b1 -> aggr;
b2 -> aggr;
a3 -> aggr;
a4 -> aggr;
aggr [label="Aggregation (mean)", shape=rect]
aggr -> predictions
}
)
!pip install tensorflow_decision_forests -U --quiet
!pip install wurlitzer -U --quiet
import tensorflow_decision_forests as tfdf
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import math
import matplotlib.pyplot as plt
def make_dataset(num_examples, num_features, seed=1234):
np.random.seed(seed)
features = np.random.uniform(-1, 1, size=(num_examples, num_features))
noise = np.random.uniform(size=(num_examples))
left_side = np.sqrt(
np.sum(np.multiply(np.square(features[:, 0:2]), [1, 2]), axis=1))
right_side = features[:, 2] * 0.7 + np.sin(
features[:, 3] * 10) * 0.5 + noise * 0.0 + 0.5
labels = left_side <= right_side
return features, labels.astype(int)
make_dataset(num_examples=5, num_features=4)
plot_features, plot_label = make_dataset(num_examples=50000, num_features=4)
plt.rcParams["figure.figsize"] = [8, 8]
common_args = dict(c=plot_label, s=1.0, alpha=0.5)
plt.subplot(2, 2, 1)
plt.scatter(plot_features[:, 0], plot_features[:, 1], **common_args)
plt.subplot(2, 2, 2)
plt.scatter(plot_features[:, 1], plot_features[:, 2], **common_args)
plt.subplot(2, 2, 3)
plt.scatter(plot_features[:, 0], plot_features[:, 2], **common_args)
plt.subplot(2, 2, 4)
plt.scatter(plot_features[:, 0], plot_features[:, 3], **common_args)
def make_tf_dataset(batch_size=64, **args):
features, labels = make_dataset(**args)
return tf.data.Dataset.from_tensor_slices(
(features, labels)).batch(batch_size)
num_features = 10
train_dataset = make_tf_dataset(
num_examples=2500, num_features=num_features, batch_size=100, seed=1234)
test_dataset = make_tf_dataset(
num_examples=10000, num_features=num_features, batch_size=100, seed=5678)
# Input features.
raw_features = tf.keras.layers.Input(shape=(num_features,))
# Stage 1
# =======
# Common learnable pre-processing
preprocessor = tf.keras.layers.Dense(10, activation=tf.nn.relu6)
preprocess_features = preprocessor(raw_features)
# Stage 2
# =======
# Model #1: NN
m1_z1 = tf.keras.layers.Dense(5, activation=tf.nn.relu6)(preprocess_features)
m1_pred = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)(m1_z1)
# Model #2: NN
m2_z1 = tf.keras.layers.Dense(5, activation=tf.nn.relu6)(preprocess_features)
m2_pred = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)(m2_z1)
# Model #3: DF
model_3 = tfdf.keras.RandomForestModel(num_trees=1000, random_seed=1234)
m3_pred = model_3(preprocess_features)
# Model #4: DF
model_4 = tfdf.keras.RandomForestModel(
num_trees=1000,
#split_axis="SPARSE_OBLIQUE", # Uncomment this line to increase the quality of this model
random_seed=4567)
m4_pred = model_4(preprocess_features)
# Since TF-DF uses deterministic learning algorithms, you should set the model's
# training seed to different values otherwise both
# `tfdf.keras.RandomForestModel` will be exactly the same.
# Stage 3
# =======
mean_nn_only = tf.reduce_mean(tf.stack([m1_pred, m2_pred], axis=0), axis=0)
mean_nn_and_df = tf.reduce_mean(
tf.stack([m1_pred, m2_pred, m3_pred, m4_pred], axis=0), axis=0)
# Keras Models
# ============
ensemble_nn_only = tf.keras.models.Model(raw_features, mean_nn_only)
ensemble_nn_and_df = tf.keras.models.Model(raw_features, mean_nn_and_df)
from keras.utils.vis_utils import plot_model
plot_model(ensemble_nn_and_df, to_file="/tmp/model.png", show_shapes=True)
%%time
ensemble_nn_only.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=["accuracy"])
ensemble_nn_only.fit(train_dataset, epochs=20, validation_data=test_dataset)
evaluation_nn_only = ensemble_nn_only.evaluate(test_dataset, return_dict=True)
print("Accuracy (NN #1 and #2 only): ", evaluation_nn_only["accuracy"])
print("Loss (NN #1 and #2 only): ", evaluation_nn_only["loss"])
%%time
train_dataset_with_preprocessing = train_dataset.map(lambda x,y: (preprocessor(x), y))
test_dataset_with_preprocessing = test_dataset.map(lambda x,y: (preprocessor(x), y))
model_3.fit(train_dataset_with_preprocessing)
model_4.fit(train_dataset_with_preprocessing)
model_3.compile(["accuracy"])
model_4.compile(["accuracy"])
evaluation_df3_only = model_3.evaluate(
test_dataset_with_preprocessing, return_dict=True)
evaluation_df4_only = model_4.evaluate(
test_dataset_with_preprocessing, return_dict=True)
print("Accuracy (DF #3 only): ", evaluation_df3_only["accuracy"])
print("Accuracy (DF #4 only): ", evaluation_df4_only["accuracy"])
ensemble_nn_and_df.compile(
loss=tf.keras.losses.BinaryCrossentropy(), metrics=["accuracy"])
evaluation_nn_and_df = ensemble_nn_and_df.evaluate(
test_dataset, return_dict=True)
print("Accuracy (2xNN and 2xDF): ", evaluation_nn_and_df["accuracy"])
print("Loss (2xNN and 2xDF): ", evaluation_nn_and_df["loss"])
#@title
print(f"Accuracy (NN #1 and #2 only):\t{evaluation_nn_only['accuracy']:.6f}")
print(f"Accuracy (DF #3 only):\t\t{evaluation_df3_only['accuracy']:.6f}")
print(f"Accuracy (DF #4 only):\t\t{evaluation_df4_only['accuracy']:.6f}")
print("----------------------------------------")
print(f"Accuracy (2xNN and 2xDF):\t{evaluation_nn_and_df['accuracy']:.6f}")
def delta_percent(src_eval, key):
src_acc = src_eval["accuracy"]
final_acc = evaluation_nn_and_df["accuracy"]
increase = final_acc - src_acc
print(f"\t\t\t\t {increase:+.6f} over {key}")
delta_percent(evaluation_nn_only, "NN #1 and #2 only")
delta_percent(evaluation_df3_only, "DF #3 only")
delta_percent(evaluation_df4_only, "DF #4 only")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Composing Decision Forest and Neural Network models
Step3: Your composed model has three stages
Step4: Wurlitzer is needed to display the detailed training logs in Colabs (when using verbose=2 in the model constructor).
Step5: Import libraries
Step6: Dataset
Step7: Generate some examples
Step8: You can also plot them to get an idea of the synthetic pattern
Step9: Note that this pattern is smooth and not axis aligned. This will advantage the neural network models. This is because it is easier for a neural network than for a decision tree to have round and non aligned decision boundaries.
Step10: Model structure
Step11: Before you train the model, you can plot it to check if it is similar to the
Step12: Model training
Step13: Let's evaluate the preprocessing and the part with the two neural networks only
Step14: Let's train the two Decision Forest components (one after another).
Step15: And let's evaluate the Decision Forests individually.
Step16: Let's evaluate the entire model composition
Step17: To finish, let's finetune the neural network layer a bit more. Note that we do
|
13,916
|
<ASSISTANT_TASK:>
Python Code:
import ee
ee.Initialize()
from geetools import batch
p1 = ee.Geometry.Point([-71,-42])
p2 = ee.Geometry.Point([-71,-43])
p3 = ee.Geometry.Point([-71,-44])
feat1 = ee.Feature(p1.buffer(1000), {'site': 1})
feat2 = ee.Feature(p2.buffer(1000), {'site': 2})
feat3 = ee.Feature(p3.buffer(1000), {'site': 3})
fc = ee.FeatureCollection([feat1, feat2, feat3])
collection = ee.ImageCollection('COPERNICUS/S2').filterBounds(fc.geometry())
image = collection.mosaic()
task = batch.Export.image.toDriveByFeature(
image,
collection=fc,
folder='tools_exportbyfeat',
name='test {site}',
scale=10,
dataType='float',
verbose=True
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: FeatureCollection
Step2: Image
Step3: Execute
|
13,917
|
<ASSISTANT_TASK:>
Python Code:
import scipy.io
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
mat_data = scipy.io.loadmat('/train_1/1_12_1.mat')
' :: '.join([str(mat_data['__header__']), str(mat_data['__version__']), str(mat_data['__globals__'])])
data = mat_data['dataStruct']
for i in [data, data[0], data[0][0][0], data[0][0][0][0]]:
print((i.shape, i.size))
matplotlib.rcParams['figure.figsize'] = (20.0, 20.0)
x = data[0][0][0]
count = 8
for i in range(count * 2):
plt.subplot(16, 1, i + 1)
plt.plot(x[:, i])
x_std = x.std(axis=1, dtype=np.float64)
print(x_std.shape, x_std.ndim)
x_split = np.array(np.split(x_std, 100))
print(x_split.shape)
x_mean = np.mean(x_split, axis=0)
print(x_mean.shape)
plt.subplot(3, 1, 1)
plt.plot(x)
plt.subplot(3, 1, 2)
plt.plot(x_std)
plt.subplot(3, 1, 3)
plt.plot(x_mean)
import scipy.io
mat_data = scipy.io.loadmat('/train_1/1_45_1.mat', verify_compressed_data_integrity=False)
data = mat_data['dataStruct']
print(data.dtype, data['sequence'][0].shape, data['sequence'].dtype)
x = data[0][0][0]
print(x.dtype)
plt.plot(x)
from scipy.fftpack import rfft
matplotlib.rcParams['figure.figsize'] = (20.0, 10.0)
n = 16
n2 = 256
mat_data = scipy.io.loadmat('/train_1/1_1_0.mat')
data = mat_data['dataStruct']
x = data[0][0][0]
x_fft = rfft(x, n=n, axis=0)[:n2]
print(x_fft.shape, x_fft.size)
X = x_fft
print('X shape:', X.shape)
# plt.subplot(2, 2, 1)
# plt.plot(x_fft)
mat_data = scipy.io.loadmat('/train_1/1_1_1.mat')
data = mat_data['dataStruct']
x = data[0][0][0]
x_fft = rfft(x, n=n, axis=0)[:n2]
print(x_fft.shape, x_fft.size)
X = np.column_stack([X, x_fft])
print('X shape:', X.shape)
# plt.subplot(2, 2, 2)
# plt.plot(x_fft)
mat_data = scipy.io.loadmat('/train_1/1_2_0.mat')
data = mat_data['dataStruct']
x = data[0][0][0]
x_fft = rfft(x, n=n, axis=0)[:n2]
print(x_fft.shape, x_fft.size)
X = np.column_stack([X, x_fft])
print('X shape:', X.shape)
# plt.subplot(2, 2, 3)
# plt.plot(x_fft)
mat_data = scipy.io.loadmat('/train_1/1_2_1.mat')
data = mat_data['dataStruct']
x = data[0][0][0]
x_fft = rfft(x, n=n, axis=0)[:n2]
print(x_fft.shape, x_fft.size)
X = np.column_stack([X, x_fft])
print('X shape:', X.shape)
# plt.subplot(2, 2, 4)
# plt.plot(x_fft)
plt.plot(X)
import itertools
from scipy import signal
sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
sig_noise = sig + np.random.randn(len(sig))
corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
correlations = [
signal.correlate(X[a], X[b], mode='same')
for (a, b) in itertools.combinations(range(16), 2)
]
max_corr = np.maximum(*correlations)
# Cross correlation
N = 400
def correlation_a_b(a, b, t, offset=0):
if t < 0:
return correlation_a_b(b, a, -t, offset)
A = None
for i in range(N - t):
new_slice = a[offset + i: offset + i + t]
A = np.vstack([A, new_slice]) if A is not None else new_slice
return np.sum(b[offset: offset + t] * A) * 1 / (N - t)
def cross_correlation(a, b):
max_correlations = []
for offset in range(0, 10 * 60 * 400, 400):
correlations = []
for t in np.arange(-.5, .5, 0.1):
t = int(round(N * t))
correlations.append(correlation_a_b(a, b, t, offset))
max_correlations.append(max(correlations))
return max_correlations
%time corr = cross_correlation(X[0], X[1])
len(corr)
base = '/train_1/'
base_tests = '/test_1/'
import matplotlib.pyplot as plt
import numpy as np
import scipy.io
# Saves the data to a CSV file
def delete_content(file):
with open(file, "w"):
pass
# delete_content('train_1.csv')
# np.savetxt('train_1.csv', rows, fmt='%10.8f', delimiter=',')
def get_class_from_name(name):
Gets the class from the file name.
The class is defined by the last number written in the file name.
For example:
Input: ".../1_1_1.mat"
Output: 1.0
Input: ".../1_1_0.mat"
Output: 0.0
try:
return float(name[-5])
except:
return 0.0
assert get_class_from_name('/train_1/1_1_0.mat') == 0.0
assert get_class_from_name('/train_1/1_1_1.mat') == 1.0
from scipy.fftpack import rfft
def get_X_files_and_y(base_dir, train_samples=600):
ignored_files = ['.DS_Store', '1_45_1.mat']
X_files = np.array([])
y = np.array([])
for i, filename in enumerate(os.listdir(base_dir)):
if filename in ignored_files:
continue
X_files = np.append(X_files, str(filename))
y = np.append(y, get_class_from_name(filename)) # The number of readings
if i >= train_samples:
break
return X_files, y
def get_X_from_files(base_dir, files, show_progress=True):
Given a list of filenames, returns the Standard deviation of the content of each file as a row.
X = None
n = 128
total_files = len(files)
for i, filename in enumerate(files):
if show_progress and i % int(total_files / 10) == 0:
print(u'%{}: Loading file {}'.format(int(i * 100 / total_files), filename))
try:
mat_data = scipy.io.loadmat(''.join([base_dir, filename]))
except ValueError as ex:
print(u'Error loading MAT file {}: {}'.format(filename, str(ex)))
continue
data = mat_data['dataStruct'][0][0][0]
x_fft = rfft(data, n=n, axis=0)
X = np.vstack([X, x_fft]) if X is not None else x_fft
return X
# Utility function to report best scores
from operator import itemgetter
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
import os
from sklearn.cross_validation import train_test_split
X_files, y = get_X_files_and_y(base, train_samples=200)
X_train_files, X_test_files, y_train, y_test = train_test_split(X_files, y, test_size=0.33, random_state=42)
%time X_train = get_X_from_files(base_dir=base, files=X_train_files)
%time X_test = get_X_from_files(base_dir=base, files=X_test_files)
print(u'X_train shape: {} - y_train shape: {}'.format(X_train.shape, y_train.shape))
print(u'X_test shape: {} - y_test shape: {}'.format(X_test.shape, y_test.shape))
from time import time
from sklearn import linear_model
from sklearn.grid_search import GridSearchCV
clf = linear_model.LogisticRegression(class_weight='balanced', n_jobs=-1)
param_grid = {
'C': [0.001, 0.1, 1, 10, 1000],
'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag'],
'tol': [0.0001, 0.001, 0.01, 0.1],
}
# run grid search
# grid_search = GridSearchCV(clf, param_grid=param_grid, verbose=1)
# start = time()
# grid_search.fit(X_train, y_train)
# print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
# % (time() - start, len(grid_search.grid_scores_)))
# report(grid_search.grid_scores_)
from sklearn import linear_model
clf = linear_model.LogisticRegression(class_weight={1:1000, 0:1}, n_jobs=1, solver='lbfgs', C=1e5)
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
from sklearn.svm import SVC
clf = SVC(class_weight='balanced', probability=True)
%time clf.fit(X_train, y_train)
clf.score(X_test, y_test), len([i for i in y_test if i == 0]), len([i for i in y_test if i == 1])
clf
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
%time y_pred = clf.predict(X_test)
print(u'Accuracy:', accuracy_score(y_test, y_pred))
print(u'Precision:', precision_score(y_test, y_pred))
print(u'Recall:', recall_score(y_test, y_pred))
print(u'F1 score:', f1_score(y_test, y_pred, average='binary'))
%time y_pred = clf.predict_proba(X_test)
y_pred = y_pred[:, 0] * y_pred[:, 1]
import pandas
dtypes = [('File', 'S16'), ('Class', 'float32')]
data = np.array(list(zip(X_test_files, y_pred)), dtype=dtypes)
print(X_test_files[1:5])
data_frame = pandas.DataFrame(data)
data_frame['File'] = data_frame['File'].astype(str)
csv = data_frame.to_csv(float_format='%.5f', index=False).replace("b'", "'")
with open('train_1.csv', 'w') as f:
f.write(csv)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NIH Seizure
Step2: Load the Data Scientist weapons
Step5: Create some usefull methods
Step6: Load the files and calculate their Standar deviation
Step7: Get the X_train and X_test data from the files.
Step8: Create a GridSearch to find the best hyperparameters for the classifier
Step9: Create and run the Linear Classifier
|
13,918
|
<ASSISTANT_TASK:>
Python Code:
BATCH_SIZE = 128
EPOCHS = 10
training_images_file = 'gs://mnist-public/train-images-idx3-ubyte'
training_labels_file = 'gs://mnist-public/train-labels-idx1-ubyte'
validation_images_file = 'gs://mnist-public/t10k-images-idx3-ubyte'
validation_labels_file = 'gs://mnist-public/t10k-labels-idx1-ubyte'
import os, re, math, json, shutil, pprint
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import IPython.display as display
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
print("Tensorflow version " + tf.__version__)
#@title visualization utilities [RUN ME]
This cell contains helper functions used for visualization
and downloads only. You can skip reading it. There is very
little useful Keras/Tensorflow code here.
# Matplotlib config
plt.ioff()
plt.rc('image', cmap='gray_r')
plt.rc('grid', linewidth=1)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0', figsize=(16,9))
# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO)
def dataset_to_numpy_util(training_dataset, validation_dataset, N):
# get one batch from each: 10000 validation digits, N training digits
batch_train_ds = training_dataset.unbatch().batch(N)
# eager execution: loop through datasets normally
if tf.executing_eagerly():
for validation_digits, validation_labels in validation_dataset:
validation_digits = validation_digits.numpy()
validation_labels = validation_labels.numpy()
break
for training_digits, training_labels in batch_train_ds:
training_digits = training_digits.numpy()
training_labels = training_labels.numpy()
break
else:
v_images, v_labels = validation_dataset.make_one_shot_iterator().get_next()
t_images, t_labels = batch_train_ds.make_one_shot_iterator().get_next()
# Run once, get one batch. Session.run returns numpy results
with tf.Session() as ses:
(validation_digits, validation_labels,
training_digits, training_labels) = ses.run([v_images, v_labels, t_images, t_labels])
# these were one-hot encoded in the dataset
validation_labels = np.argmax(validation_labels, axis=1)
training_labels = np.argmax(training_labels, axis=1)
return (training_digits, training_labels,
validation_digits, validation_labels)
# create digits from local fonts for testing
def create_digits_from_local_fonts(n):
font_labels = []
img = PIL.Image.new('LA', (28*n, 28), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1
font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25)
font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25)
d = PIL.ImageDraw.Draw(img)
for i in range(n):
font_labels.append(i%10)
d.text((7+i*28,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2)
font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded)
font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [28, 28*n]), n, axis=1), axis=0), [n, 28*28])
return font_digits, font_labels
# utility to display a row of digits with their predictions
def display_digits(digits, predictions, labels, title, n):
fig = plt.figure(figsize=(13,3))
digits = np.reshape(digits, [n, 28, 28])
digits = np.swapaxes(digits, 0, 1)
digits = np.reshape(digits, [28, 28*n])
plt.yticks([])
plt.xticks([28*x+14 for x in range(n)], predictions)
plt.grid(b=None)
for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):
if predictions[i] != labels[i]: t.set_color('red') # bad predictions in red
plt.imshow(digits)
plt.grid(None)
plt.title(title)
display.display(fig)
# utility to display multiple rows of digits, sorted by unrecognized/recognized status
def display_top_unrecognized(digits, predictions, labels, n, lines):
idx = np.argsort(predictions==labels) # sort order: unrecognized first
for i in range(lines):
display_digits(digits[idx][i*n:(i+1)*n], predictions[idx][i*n:(i+1)*n], labels[idx][i*n:(i+1)*n],
"{} sample validation digits out of {} with bad predictions in red and sorted first".format(n*lines, len(digits)) if i==0 else "", n)
def plot_learning_rate(lr_func, epochs):
xx = np.arange(epochs+1, dtype=np.float)
y = [lr_decay(x) for x in xx]
fig, ax = plt.subplots(figsize=(9, 6))
ax.set_xlabel('epochs')
ax.set_title('Learning rate\ndecays from {:0.3g} to {:0.3g}'.format(y[0], y[-2]))
ax.minorticks_on()
ax.grid(True, which='major', axis='both', linestyle='-', linewidth=1)
ax.grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
ax.step(xx,y, linewidth=3, where='post')
display.display(fig)
class PlotTraining(tf.keras.callbacks.Callback):
def __init__(self, sample_rate=1, zoom=1):
self.sample_rate = sample_rate
self.step = 0
self.zoom = zoom
self.steps_per_epoch = 60000//BATCH_SIZE
def on_train_begin(self, logs={}):
self.batch_history = {}
self.batch_step = []
self.epoch_history = {}
self.epoch_step = []
self.fig, self.axes = plt.subplots(1, 2, figsize=(16, 7))
plt.ioff()
def on_batch_end(self, batch, logs={}):
if (batch % self.sample_rate) == 0:
self.batch_step.append(self.step)
for k,v in logs.items():
# do not log "batch" and "size" metrics that do not change
# do not log training accuracy "acc"
if k=='batch' or k=='size':# or k=='acc':
continue
self.batch_history.setdefault(k, []).append(v)
self.step += 1
def on_epoch_end(self, epoch, logs={}):
plt.close(self.fig)
self.axes[0].cla()
self.axes[1].cla()
self.axes[0].set_ylim(0, 1.2/self.zoom)
self.axes[1].set_ylim(1-1/self.zoom/2, 1+0.1/self.zoom/2)
self.epoch_step.append(self.step)
for k,v in logs.items():
# only log validation metrics
if not k.startswith('val_'):
continue
self.epoch_history.setdefault(k, []).append(v)
display.clear_output(wait=True)
for k,v in self.batch_history.items():
self.axes[0 if k.endswith('loss') else 1].plot(np.array(self.batch_step) / self.steps_per_epoch, v, label=k)
for k,v in self.epoch_history.items():
self.axes[0 if k.endswith('loss') else 1].plot(np.array(self.epoch_step) / self.steps_per_epoch, v, label=k, linewidth=3)
self.axes[0].legend()
self.axes[1].legend()
self.axes[0].set_xlabel('epochs')
self.axes[1].set_xlabel('epochs')
self.axes[0].minorticks_on()
self.axes[0].grid(True, which='major', axis='both', linestyle='-', linewidth=1)
self.axes[0].grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
self.axes[1].minorticks_on()
self.axes[1].grid(True, which='major', axis='both', linestyle='-', linewidth=1)
self.axes[1].grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
display.display(self.fig)
AUTO = tf.data.experimental.AUTOTUNE
def read_label(tf_bytestring):
label = tf.io.decode_raw(tf_bytestring, tf.uint8)
label = tf.reshape(label, [])
label = tf.one_hot(label, 10)
return label
def read_image(tf_bytestring):
image = tf.io.decode_raw(tf_bytestring, tf.uint8)
image = tf.cast(image, tf.float32)/256.0
image = tf.reshape(image, [28*28])
return image
def load_dataset(image_file, label_file):
imagedataset = tf.data.FixedLengthRecordDataset(image_file, 28*28, header_bytes=16)
imagedataset = imagedataset.map(read_image, num_parallel_calls=16)
labelsdataset = tf.data.FixedLengthRecordDataset(label_file, 1, header_bytes=8)
labelsdataset = labelsdataset.map(read_label, num_parallel_calls=16)
dataset = tf.data.Dataset.zip((imagedataset, labelsdataset))
return dataset
def get_training_dataset(image_file, label_file, batch_size):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.shuffle(5000, reshuffle_each_iteration=True)
dataset = dataset.repeat() # Mandatory for Keras for now
dataset = dataset.batch(batch_size, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed
dataset = dataset.prefetch(AUTO) # fetch next batches while training on the current one (-1: autotune prefetch buffer size)
return dataset
def get_validation_dataset(image_file, label_file):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch
dataset = dataset.repeat() # Mandatory for Keras for now
return dataset
# instantiate the datasets
training_dataset = get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_dataset = get_validation_dataset(validation_images_file, validation_labels_file)
# For TPU, we will need a function that returns the dataset
training_input_fn = lambda: get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_input_fn = lambda: get_validation_dataset(validation_images_file, validation_labels_file)
N = 24
(training_digits, training_labels,
validation_digits, validation_labels) = dataset_to_numpy_util(training_dataset, validation_dataset, N)
display_digits(training_digits, training_labels, training_labels, "training digits and their labels", N)
display_digits(validation_digits[:N], validation_labels[:N], validation_labels[:N], "validation digits and their labels", N)
font_digits, font_labels = create_digits_from_local_fonts(N)
model = tf.keras.Sequential(
[
tf.keras.layers.Reshape(input_shape=(28*28,), target_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(kernel_size=3, filters=12, activation='relu', padding='same'),
tf.keras.layers.Conv2D(kernel_size=6, filters=24, activation='relu', padding='same', strides=2),
tf.keras.layers.Conv2D(kernel_size=6, filters=32, activation='relu', padding='same', strides=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(200, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
# print model layers
model.summary()
# utility callback that displays training curves
plot_training = PlotTraining(sample_rate=10, zoom=16)
# lr decay function
def lr_decay(epoch):
return 0.01 * math.pow(0.6, epoch)
# lr schedule callback
lr_decay_callback = tf.keras.callbacks.LearningRateScheduler(lr_decay, verbose=True)
# important to see what you are doing
plot_learning_rate(lr_decay, EPOCHS)
steps_per_epoch = 60000//BATCH_SIZE # 60,000 items in this dataset
print("Steps per epoch: ", steps_per_epoch)
history = model.fit(training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,
validation_data=validation_dataset, validation_steps=1, callbacks=[plot_training, lr_decay_callback])
# recognize digits from local fonts
probabilities = model.predict(font_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_digits(font_digits, predicted_labels, font_labels, "predictions from local fonts (bad predictions in red)", N)
# recognize validation digits
probabilities = model.predict(validation_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_top_unrecognized(validation_digits, predicted_labels, validation_labels, N, 7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Imports
Step3: tf.data.Dataset
Step4: Let's have a look at the data
Step5: Keras model
Step6: Learning Rate schedule
Step7: Train and validate the model
Step8: Visualize predictions
|
13,919
|
<ASSISTANT_TASK:>
Python Code:
from math import sin, cos, log, ceil
import numpy
from matplotlib import pyplot
%matplotlib inline
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 16
# model parameters:
g = 9.8 # gravity in m s^{-2}
v_t = 30.0 # trim velocity in m s^{-1}
C_D = 1/40. # drag coefficient --- or D/L if C_L=1
C_L = 1.0 # for convenience, use C_L = 1
### set initial conditions ###
v0 = v_t # start at the trim velocity (or add a delta)
theta0 = 0.0 # initial angle of trajectory
x0 = 0.0 # horizotal position is arbitrary
y0 = 1000.0 # initial altitude
def f(u):
Returns the right-hand side of the phugoid system of equations.
Parameters
----------
u : array of float
array containing the solution at time n.
Returns
-------
dudt : array of float
array containing the RHS given u.
v = u[0]
theta = u[1]
x = u[2]
y = u[3]
return numpy.array([-g*sin(theta) - C_D/C_L*g/v_t**2*v**2,
-g*cos(theta)/v + g/v_t**2*v,
v*cos(theta),
v*sin(theta)])
def euler_step(u, f, dt):
Returns the solution at the next time-step using Euler's method.
Parameters
----------
u : array of float
solution at the previous time-step.
f : function
function to compute the right hand-side of the system of equation.
dt : float
time-increment.
Returns
-------
u_n_plus_1 : array of float
approximate solution at the next time step.
return u + dt * f(u)
T_values = numpy.array([0.001*2**(i) for i in range(10)])
lte_values = numpy.zeros_like(T_values)
for j, T in enumerate(T_values):
dt_values = numpy.array([T*2**(i-8) for i in range(8)])
v_values = numpy.zeros_like(dt_values)
for i, dt in enumerate(dt_values):
N = int(T/dt)+1
t = numpy.linspace(0.0, T, N)
u = numpy.empty((N, 4))
u[0] = numpy.array([v0, theta0, x0, y0])
for n in range(N-1):
u[n+1] = euler_step(u[n], f, dt)
v_values[i] = u[-1,0]
v_next = v_values
for s in range(1, len(v_values-1)):
v_next = (2**s*v_next[1:]-v_next[0:-1])/(2**s-1)
lte_values[j] = abs(v_values[0]-v_next)
for dt, lte in zip(T_values, lte_values):
print("For dt={} the local truncation error is {}.".format(dt, lte))
s_m = numpy.zeros(2)
for i in range(2):
s_m[i] = log(abs((lte_values[2+i]-lte_values[1+i])/
(lte_values[1+i]-lte_values[0+i]))) / log(2.0)
print("Measured convergence rate (base dt {}) is {:.6g} (error is {:.4g}).".format(
T_values[i], s_m[i], abs(s_m[i]-2)))
print("Convergence error has reduced by factor {:.4g}.".format(
abs(s_m[0]-2)/abs(s_m[1]-2)))
import sympy
sympy.init_printing()
v, theta, x, y, g, CD, CL, vt, dt = sympy.symbols('v, theta, x, y, g, C_D, C_L, v_t, {\Delta}t')
u = sympy.Matrix([v, theta, x, y])
f = sympy.Matrix([-g*sympy.sin(theta)-CD/CL*g/vt**2*v**2,
-g/v*sympy.cos(theta)+g/vt**2*v,
v*sympy.cos(theta),
v*sympy.sin(theta)])
dfdu = f.jacobian(u)
lte=dt**2/2*dfdu*f
lte_0=lte.subs([(g,9.8),(vt,30.0),(CD,1.0/40.0),(CL,1.0),(v,30.0),(theta,0.0),(x,0.0),(y,1000.0)])
lte_0
lte_exact = float(lte_0[0]/dt**2)
lte_values/T_values**2
for i in range(len(lte_values)-1):
Edt = lte_values[i]
E2dt = lte_values[i+1]
dt = T_values[i]
err1 = abs(Edt - E2dt)
a_lo = (8.0*Edt - E2dt - err1)/(4.0*dt**2)
a_hi = (8.0*Edt - E2dt + err1)/(4.0*dt**2)
print("Base dt={:.4g}: the measured alpha is in [{:.5g}, {:.5g}]".format(
dt, a_lo, a_hi))
print("Does this contain the exact value? {}".format(
a_lo <= lte_exact <= a_hi))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: We will again need the code implementing Euler's method the full phugoid model notebook.
Step4: This time we will need lots of solutions in order to measure anything. We will construct ten local truncation errors. For each, we take a single step and store the result for $v$. Then, for each single step result, we use eight other calculations using our algorithm. Each will take multiple, smaller, steps to get a more accurate result for $v$ at the same time. We then use repeated Richardson extrapolation to find, to very high accuracy, the "true" result for $v$ at this time. Finally, we can compare against the original single step results to find the local truncation errors.
Step5: This gives us a set of local truncation errors at given timesteps
Step6: We now have many values for the local truncation error. We can thus compute the convergence rate of the local truncation error itself (which should be two), and check that it is close enough to the expected value using the same techniques as in the second post in the series
Step7: So the error has gone down considerably, and certainly $0.51 > 1/3$, so the convergence rate of the local truncation error is close enough to 2.
Step8: So let us check the local truncation error values, which are computed for v
Step9: These are indeed converging towards $0.002 \dt^2$ as they should. To check this quantitatively, we use that our model is
|
13,920
|
<ASSISTANT_TASK:>
Python Code:
from bigbang.archive import Archive
import pandas as pd
arx = Archive("ipython-dev",archive_dir="../archives")
print(arx.data.shape)
arx.data.drop_duplicates(subset=('From','Date'),inplace=True)
response_times = []
response_times = []
for x in list(arx.data.iterrows()):
if x[1]['In-Reply-To'] is not None:
try:
d1 = arx.data.loc[x[1]['In-Reply-To']]['Date']
if isinstance(d1,pd.Series):
d1 = d1[0]
d2 = x[1]['Date']
rt = (d2 - d1)
response_times.append(rt.total_seconds())
except AttributeError as e:
print(e)
except TypeError as e:
print(e)
except KeyError as e:
# print e -- suppress error
pass
len(response_times)
import matplotlib.pyplot as plt
%matplotlib inline
plt.semilogy(sorted(response_times,reverse=True))
import powerlaw
f = powerlaw.Fit(response_times)
print(f.power_law.alpha)
print(f.xmin)
print(f.D)
R, p = f.distribution_compare('power_law', 'lognormal')
print(R,p)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will look at messages in our archive that are responses to other messages and how long after the original email the response was made.
|
13,921
|
<ASSISTANT_TASK:>
Python Code:
%config InlineBackend.figure_format = 'retina'
import numpy as np
import matplotlib.pyplot as plt
import uncertainties as uct
from uncertainties import unumpy as unp
import pandas as pd
import pytheos as eos
x = unp.uarray(np.linspace(0.01,15.,20), np.ones(20)*0.5) # 0.1,7.25
energy = eos.debye_E(x)
plt.plot(unp.nominal_values(x), unp.nominal_values(energy))
plt.xlabel('x'); plt.ylabel('Energy')
plt.errorbar(unp.nominal_values(x), unp.nominal_values(energy),
xerr = unp.std_devs(x), yerr = unp.std_devs(energy));
help(eos.constq_grun)
v0 = 162.3
v = np.linspace(v0, v0*0.8, 20)
grun = eos.constq_grun(v, v0, 1.5, 2)
plt.plot(v, grun)
plt.xlabel('Unit-cell volume ($\mathrm{\AA}^3$)'); plt.ylabel('$\gamma$');
s_v = np.random.uniform(0., 0.1, 20)
v_u = unp.uarray(v, s_v)
gamma = eos.constq_grun(v_u, uct.ufloat(v0, 0.01),
uct.ufloat(1.5, 0.1), uct.ufloat(2.,0.5))
gamma
df = pd.DataFrame()
df['volume'] = v_u
df['gamma'] = gamma
df
# print(df.to_string(index=False))
plt.errorbar(unp.nominal_values(v_u),
unp.nominal_values(gamma), xerr=unp.std_devs(v_u),
yerr=unp.std_devs(gamma))
plt.xlabel('Unit-cell volume ($\mathrm{\AA}^3$)'); plt.ylabel('$\gamma$');
eos.constq_grun(v_u, v0, 1.5, 2.)
help(eos.constq_debyetemp)
eos.constq_debyetemp(v_u, v0, 1.5, 2., 1000.)
help(eos.constq_pth)
p_th = eos.constq_pth(v_u, unp.uarray(np.ones_like(v)*2000., np.ones_like(v)*100), v0, 1.5, 2., 1000., 5, 4)
p_th
plt.errorbar(unp.nominal_values(v_u), unp.nominal_values(p_th),
xerr=unp.std_devs(v_u), yerr=unp.std_devs(p_th))
plt.xlabel('Unit-cell volume ($\mathrm{\AA}^3$)'); plt.ylabel('Thermal pressure (GPa)');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 0. General note
Step2: 1. Calculate Debye energy with uncertainties
Step3: 2. Calculate Gruneisen parameter
Step4: Calculate Gruneisen parameter without error bar.
Step5: The cell below shows how to do error propagation.
Step6: If you need a pretty table.
Step7: You do not need to provide uncertainties for all the parameters. The cell below shows a case where we do not have error bars for the parameters. In this case, we have uncertainties for volume.
Step8: 3. Calculate Debye temperature and thermal pressure
Step9: You can get thermal pressures with error bars.
|
13,922
|
<ASSISTANT_TASK:>
Python Code:
import PaSDqc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pathlib
%matplotlib inline
%load_ext autoreload
%autoreload 2
sns.set_context('poster')
sns.set_style("ticks", {'ytick.minor.size': 0.0, 'xtick.minor.size': 0.0})
chr1_MN1a = PaSDqc.PSDTools.RegionPSD.analyze("../data/Zhang_2015/cov/", build='grch37', l_region=1e7, l_seg=5e5, min_freq=5e-4)
chr1_MN1a.KL_div()
chr1_MN1a.kl['chr1']
# Load the raw read depth
df_MN1a = pd.read_table("../data/Zhang_2015/cov/MN1a.chr1.map.pos.cov", names=['chr', 'pos', 'depth'])
chr1_1465 = PaSDqc.PSDTools.RegionPSD.analyze("../data/Lodato_2015/1465/cov/", build='grch37', l_region=1e7, l_seg=5e5, min_freq=5e-4)
chr1_1465.KL_div()
chr1_1465.kl['chr1']
# Load the raw read depth
df_1465= pd.read_table("../data/Lodato_2015/1465/cov/1465_MDA_30.chr1.map.pos.cov", names=['chr', 'pos', 'depth'])
f = plt.figure(figsize=(15, 10))
ax0 = f.add_subplot(221)
ax1 = f.add_subplot(223)
ax2 = f.add_subplot(222, sharex=ax0, sharey=ax0)
ax3 = f.add_subplot(224, sharex=ax1, sharey=ax1)
cp = sns.color_palette()
ax0.plot(chr1_MN1a.kl['chr1'][:10], 'o')
ax0.plot(chr1_MN1a.kl['chr1'][14:], 'o', color=cp[0])
ax0.plot(chr1_MN1a.kl['chr1'].index[10], chr1_MN1a.kl['chr1'][10], 'o', color=cp[3])
ax0.plot(chr1_MN1a.kl['chr1'].index[11], chr1_MN1a.kl['chr1'][11], 'o', color=cp[2])
ax0.plot(chr1_MN1a.kl['chr1'].index[13], chr1_MN1a.kl['chr1'][13], 'o', color=cp[1])
ax0.plot(chr1_MN1a.kl['chr1'].index[14], chr1_MN1a.kl['chr1'][14], 'o', color=cp[3])
ax0.set_ylabel('KL Divergence')
ax0.set_xlabel('position')
ax1.plot(df_MN1a.pos[(df_MN1a.pos >= 100017322) & (df_MN1a.pos <= 110017322)], df_MN1a.depth[(df_MN1a.pos >= 100017322) & (df_MN1a.pos <= 110017322)], color=cp[3])
ax1.plot(df_MN1a.pos[(df_MN1a.pos >= 110017322) & (df_MN1a.pos <= 121485368)], df_MN1a.depth[(df_MN1a.pos >= 110017322) & (df_MN1a.pos <= 121485368)], color=cp[2])
ax1.plot(df_MN1a.pos[(df_MN1a.pos >= 142535448) & (df_MN1a.pos <= 152535448)], df_MN1a.depth[(df_MN1a.pos >= 142535448) & (df_MN1a.pos <= 152535448)], color=cp[1])
ax1.plot(df_MN1a.pos[(df_MN1a.pos >= 152535448) & (df_MN1a.pos <= 162535448)], df_MN1a.depth[(df_MN1a.pos >= 152535448) & (df_MN1a.pos <= 162535448)], color=cp[3])
ax1.set_ylabel('Depth')
ax1.set_xlabel('position')
ax2.plot(chr1_1465.kl['chr1'][:10], 'o')
ax2.plot(chr1_1465.kl['chr1'][14:], 'o', color=cp[0])
ax2.plot(chr1_1465.kl['chr1'].index[10], chr1_1465.kl['chr1'][10], 'o', color=cp[3])
ax2.plot(chr1_1465.kl['chr1'].index[11], chr1_1465.kl['chr1'][11], 'o', color=cp[2])
ax2.plot(chr1_1465.kl['chr1'].index[13], chr1_1465.kl['chr1'][13], 'o', color=cp[1])
ax2.plot(chr1_1465.kl['chr1'].index[14], chr1_1465.kl['chr1'][14], 'o', color=cp[3])
# ax2.set_ylabel('KL Divergence')
ax2.set_xlabel('position')
ax3.plot(df_1465.pos[(df_1465.pos >= 100017322) & (df_1465.pos <= 110017322)], df_1465.depth[(df_1465.pos >= 100017322) & (df_1465.pos <= 110017322)], color=cp[3])
ax3.plot(df_1465.pos[(df_1465.pos >= 110017322) & (df_1465.pos <= 121485368)], df_1465.depth[(df_1465.pos >= 110017322) & (df_1465.pos <= 121485368)], color=cp[2])
ax3.plot(df_1465.pos[(df_1465.pos >= 142535448) & (df_1465.pos <= 152535448)], df_1465.depth[(df_1465.pos >= 142535448) & (df_1465.pos <= 152535448)], color=cp[1])
ax3.plot(df_1465.pos[(df_1465.pos >= 152535448) & (df_1465.pos <= 162535448)], df_1465.depth[(df_1465.pos >= 152535448) & (df_1465.pos <= 162535448)], color=cp[3])
# ax3.set_ylabel('Depth')
ax3.set_xlabel('position')
ax0.set_title('MN1a Chr1')
ax2.set_title('1465 Cell 30 Chr1')
f.text(0.01, 0.97, "A", weight="bold", horizontalalignment='left', verticalalignment='center', fontsize=20)
f.text(0.51, 0.97, "B", weight="bold", horizontalalignment='left', verticalalignment='center', fontsize=20)
#f.text(0.49, 0.66, "C", weight="bold", horizontalalignment='left', verticalalignment='center', fontsize=20)
f.text(0.01, 0.49, "C", weight="bold", horizontalalignment='left', verticalalignment='center', fontsize=20)
f.text(0.51, 0.49, "D", weight="bold", horizontalalignment='left', verticalalignment='center', fontsize=20)
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Analyze MN1a from Zhang et al, 2015
Step2: Analyze 1465 MDA 30 from Lodato et al, 2015
Step3: Make the figure
|
13,923
|
<ASSISTANT_TASK:>
Python Code:
!pip install tqdm
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = '/Users/syednasar/sn/dev/workspace/mygit/data/cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 5
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
# TODO: Implement Function
# TODO: Implement Function
a = 0
b = 1
grayscale_min = 0
grayscale_max = 255
return a + ( ( (x - grayscale_min)*(b - a) )/( grayscale_max - grayscale_min ) )
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
from sklearn.preprocessing import LabelBinarizer
mapped = LabelBinarizer().fit_transform([x for x in range(10)])
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
# TODO: Implement Function
encoded = np.array([mapped[i] for i in x])
#print(encoded[:2])
return encoded
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open(cifar10_dataset_folder_path + '/preprocess_validation.p', mode='rb'))
import tensorflow as tf
#Ref: https://classroom.udacity.com/nanodegrees/nd101/parts/2a9dba0b-28eb-4b0e-acfa-bdcf35680d90/modules/ca299d5f-35f6-4520-a2b0-74c6878008b3/lessons/2fd24529-215c-47b5-a644-2c23650493f6/concepts/d0db3cab-ad70-46ec-9614-4dfc27dfc865
def neural_net_image_input(image_shape):
Return a Tensor for a bach of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function
#print(image_shape)
a,b,c= image_shape
x = tf.placeholder(tf.float32, (None, a, b, c),name='x')
return x
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function
x = tf.placeholder(tf.float32, (None, n_classes),name='y')
return x
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# TODO: Implement Function
x = tf.placeholder(tf.float32, None,name='keep_prob')
return x
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# TODO: Implement Function
channels = int(x_tensor.get_shape()[3])
weight = tf.Variable(tf.truncated_normal((conv_ksize[0], conv_ksize[1], channels, conv_num_outputs)))
bias = tf.Variable(tf.zeros(conv_num_outputs))
conv_layer = tf.nn.conv2d(x_tensor, weight, strides=[1,*conv_strides,1], padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, bias)
conv_layer = tf.nn.relu(conv_layer)
# Applying maxpool
conv_layer = tf.nn.max_pool(conv_layer, ksize=[1,pool_ksize[0],pool_ksize[1],1], strides=[1,pool_strides[0],pool_strides[1],1],padding='SAME')
#print(conv_layer.shape)
return conv_layer
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# TODO: Implement Function
tensor_dims = x_tensor.get_shape().as_list()
new_tensor = tf.reshape(x_tensor, [-1, tensor_dims[1]*tensor_dims[2]*tensor_dims[3]])
return new_tensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
tensor_dims = x_tensor.get_shape().as_list()
fc_weights = tf.Variable(tf.truncated_normal([tensor_dims[1], num_outputs], mean=0.0, stddev=0.01 ))
fc_bias = tf.Variable(tf.zeros([num_outputs]))
fc_output = tf.add(tf.matmul(x_tensor, fc_weights), fc_bias)
fc_output = tf.nn.relu(fc_output)
return fc_output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
tensor_dims = x_tensor.get_shape().as_list()
op_weights = tf.Variable(tf.truncated_normal([tensor_dims[1], num_outputs]))
op_bias = tf.Variable(tf.truncated_normal([num_outputs]))
op_output = tf.add(tf.matmul(x_tensor, op_weights), op_bias)
return op_output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net_OLD(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
# TODO: return output
conv_num_outputs = 10
conv_ksize = [3, 3]
conv_strides = (1, 1)
pool_ksize = (3, 3)
pool_strides = (1, 1)
num_outputs = 1
layer1 = conv2d_maxpool(x, conv_num_outputs, conv_ksize, conv_strides,
pool_ksize, pool_strides)
layer2 = tf.nn.dropout(layer1, keep_prob)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
layer3 = flatten(layer2)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
layer4 = fully_conn(layer3, num_outputs)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
n_classes = y.get_shape().as_list()[1]
out = (layer4, n_classes)
# TODO: return output
return out
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
num_outputs = 10
conv_num_outputs = 300
conv_ksize = (2,2)
pool_ksize = (4,4)
conv_strides = (1,1)
pool_strides = (1,1)
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
x_tensor = conv2d_maxpool(x, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
x_tensor = conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
fc1 = flatten(x_tensor)
#fc1 = tf.nn.dropout(fc1, keep_prob)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
fc2 = fully_conn(fc1, 100)
fc2 = fully_conn(fc2, 50)
fc2 = tf.nn.dropout(fc2, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
fc3 = output(fc2, num_outputs)
# TODO: return output
return fc3
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function
result = session.run(optimizer, feed_dict={
x: feature_batch,
y: label_batch,
keep_prob: keep_probability})
return result
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
current_cost = sess.run(
cost,
feed_dict={features: feature_batch, labels: label_batch})
valid_accuracy = sess.run(
accuracy,
feed_dict={features: valid_features, labels: valid_labels})
print('Epoch: {:<4} - Cost: {:<8.3} Valid Accuracy: {:<5.3}'.format(
epoch_i,
current_cost,
valid_accuracy))
# TODO: Tune Parameters
epochs = 10
batch_size = 64
keep_probability = 0.75
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open( cifar10_dataset_folder_path + '/preprocess_training.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step33: Create Convolutional Model
Step36: Train the Neural Network
Step38: Show Stats
Step39: Hyperparameters
Step41: Train on a Single CIFAR-10 Batch
Step43: Fully Train the Model
Step46: Checkpoint
|
13,924
|
<ASSISTANT_TASK:>
Python Code:
# Execute this cell to load the notebook's style sheet, then ignore it
from IPython.core.display import HTML
css_file = '../style/custom.css'
HTML(open(css_file, "r").read())
import numpy
numpy.array([3, 5, 8, 17])
numpy.ones(5)
numpy.zeros(3)
numpy.arange(4)
numpy.arange(2, 6)
numpy.arange(2, 6, 2)
numpy.arange(2, 6, 0.5)
numpy.linspace(2.0, 3.0)
len(numpy.linspace(2.0, 3.0))
numpy.linspace(2.0, 3.0, 6)
numpy.linspace(-1, 1, 9)
x_array = numpy.linspace(-1, 1, 9)
y_array = x_array**2
print(y_array)
z_array = numpy.sqrt(y_array)
print(z_array)
add_array = x_array + y_array
print(add_array)
mult_array = x_array * z_array
print(mult_array)
x_array / y_array
array_2d = numpy.array([[1, 2], [3, 4]])
print(array_2d)
X = numpy.array([[1, 2], [3, 4]])
Y = numpy.array([[1, -1], [0, 1]])
X + Y
X * Y
X @ Y
numpy.dot(X, Y)
a = numpy.arange(24)
a_3D = numpy.reshape(a, (2, 3, 4))
print(a_3D)
numpy.shape(a_3D)
X
# Grab the element in the 1st row and 1st column
X[0, 0]
# Grab the element in the 1st row and 2nd column
X[0, 1]
# Grab the 1st column
X[:, 0]
# Grab the 1st row
X[0, :]
a_3D
a_3D[:, :, 0]
a_3D[:, 0:2, 0]
a_3D[0, 1, 1:3]
#import random library
import random
lst_1 = random.sample(range(100), 100)
lst_2 = random.sample(range(100), 100)
#print first 10 elements
print(lst_1[0:10])
print(lst_2[0:10])
%%time
res_lst = []
for i in range(100):
res_lst.append(lst_1[i] + lst_2[i])
print(res_lst[0:10])
arr_1 = numpy.random.randint(0, 100, size=100)
arr_2 = numpy.random.randint(0, 100, size=100)
#print first 10 elements
print(arr_1[0:10])
print(arr_2[0:10])
%%time
arr_res = arr_1 + arr_2
xarray = numpy.linspace(0, 2, 41)
print(xarray)
pow2 = xarray**2
pow3 = xarray**3
pow_half = numpy.sqrt(xarray)
from matplotlib import pyplot
%matplotlib inline
#Plot x^2
pyplot.plot(xarray, pow2, color='k', linestyle='-', label='square')
#Plot x^3
pyplot.plot(xarray, pow3, color='k', linestyle='--', label='cube')
#Plot sqrt(x)
pyplot.plot(xarray, pow_half, color='k', linestyle=':', label='square root')
#Plot the legends in the best location
pyplot.legend(loc='best')
#Plot x^2
pyplot.plot(xarray, pow2, color='red', linestyle='-', label='$x^2$')
#Plot x^3
pyplot.plot(xarray, pow3, color='green', linestyle='-', label='$x^3$')
#Plot sqrt(x)
pyplot.plot(xarray, pow_half, color='blue', linestyle='-', label='$\sqrt{x}$')
#Plot the legends in the best location
pyplot.legend(loc='best');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Play with NumPy Arrays
Step2: Creating arrays
Step3: NumPy offers many ways to create arrays in addition to this. We already mentioned some of them above.
Step4: Another useful one
Step5: numpy.linspace() is similar to numpy.arange(), but uses number of samples instead of a step size. It returns an array with evenly spaced numbers over the specified interval.
Step6: Array operations
Step7: Now that we've saved it with a variable name, we can do some computations with the array. E.g., take the square of every element of the array, in one go
Step8: We can also take the square root of a positive array, using the numpy.sqrt() function
Step9: Now that we have different arrays x_array, y_array and z_array, we can do more computations, like add or multiply them. For example
Step10: Array addition is defined element-wise, like when adding two vectors (or matrices). Array multiplication is also element-wise
Step11: We can also divide arrays, but you have to be careful not to divide by zero. This operation will result in a nan which stands for Not a Number. Python will still perform the division, but will tell us about the problem.
Step12: Multidimensional arrays
Step13: 2D arrays can be added, subtracted, and multiplied
Step14: The addition of these two matrices works exactly as you would expect
Step15: What if we try to multiply arrays using the '*'operator?
Step16: The multiplication using the '*' operator is element-wise. If we want to do matrix multiplication we use the '@' operator
Step17: Or equivalently we can use numpy.dot()
Step18: 3D arrays
Step19: We can check for the shape of a NumPy array using the function numpy.shape()
Step20: Visualizing the dimensions of the a_3D array can be tricky, so here is a diagram that will help you to understand how the dimensions are assigned
Step21: Exercises
Step22: When we don't specify the start and/or end point in the slicing, the symbol '
Step23: Exercises
Step24: If we want to grab the first column of both matrices in our a_3D array, we do
Step25: The line above is telling NumPy that we want
Step26: Below, from the first matrix in our a_3D array, we will grab the two middle elements (5,6)
Step27: Exercises
Step28: We need to write a for statement, appending the result of the element-wise sum into a new list we call result_lst.
Step29: Element-wise sum of NumPy arrays
Step30: Now we can use the %%time cell magic, again, to see how long it takes NumPy to compute the element-wise sum.
Step31: Notice that in the case of arrays, the code not only is more readable (just one line of code), but it is also faster than with lists. This time advantage will be larger with bigger arrays/lists.
Step32: To plot the resulting arrays as a function of the orginal one (xarray) in the x-axis, we need to import the module pyplot from Matplotlib.
Step33: The command %matplotlib inline is there to get our plots inside the notebook (instead of a pop-up window, which is the default behavior of pyplot).
Step34: To illustrate other features, we will plot the same data, but varying the colors instead of the line style. We'll also use LaTeX syntax to write formulas in the labels. If you want to know more about LaTeX syntax, there is a quick guide to LaTeX available online.
|
13,925
|
<ASSISTANT_TASK:>
Python Code:
import io, os, sys, types
from IPython import get_ipython
from nbformat import read
from IPython.core.interactiveshell import InteractiveShell
def find_notebook(fullname, path=None):
find a notebook, given its fully qualified name and an optional path
This turns "foo.bar" into "foo/bar.ipynb"
and tries turning "Foo_Bar" into "Foo Bar" if Foo_Bar
does not exist.
name = fullname.rsplit('.', 1)[-1]
if not path:
path = ['']
for d in path:
nb_path = os.path.join(d, name + ".ipynb")
if os.path.isfile(nb_path):
return nb_path
# let import Notebook_Name find "Notebook Name.ipynb"
nb_path = nb_path.replace("_", " ")
if os.path.isfile(nb_path):
return nb_path
class NotebookLoader(object):
Module Loader for Jupyter Notebooks
def __init__(self, path=None):
self.shell = InteractiveShell.instance()
self.path = path
def load_module(self, fullname):
import a notebook as a module
path = find_notebook(fullname, self.path)
print ("importing Jupyter notebook from %s" % path)
# load the notebook object
with io.open(path, 'r', encoding='utf-8') as f:
nb = read(f, 4)
# create the module and add it to sys.modules
# if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
mod.__dict__['get_ipython'] = get_ipython
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb.cells:
if cell.cell_type == 'code':
# transform the input to executable Python
code = self.shell.input_transformer_manager.transform_cell(cell.source)
# run the code in themodule
exec(code, mod.__dict__)
finally:
self.shell.user_ns = save_user_ns
return mod
class NotebookFinder(object):
Module finder that locates Jupyter Notebooks
def __init__(self):
self.loaders = {}
def find_module(self, fullname, path=None):
nb_path = find_notebook(fullname, path)
if not nb_path:
return
key = path
if path:
# lists aren't hashable
key = os.path.sep.join(path)
if key not in self.loaders:
self.loaders[key] = NotebookLoader(path)
return self.loaders[key]
sys.meta_path.append(NotebookFinder())
ls
from linear_algebra import sum_of_squares, dot
#ls nbpackage
#import nbpackage.mynotebook
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from IPython.display import display, HTML
formatter = HtmlFormatter()
lexer = PythonLexer()
# publish the CSS for pygments highlighting
display(HTML(
<style type='text/css'>
%s
</style>
% formatter.get_style_defs()
))
def show_notebook(fname):
display a short summary of the cells of a notebook
with io.open(fname, 'r', encoding='utf-8') as f:
nb = read(f, 4)
html = []
for cell in nb.cells:
html.append("<h4>%s cell</h4>" % cell.cell_type)
if cell.cell_type == 'code':
html.append(highlight(cell.source, lexer, formatter))
else:
html.append("<pre>%s</pre>" % cell.source)
display(HTML('\n'.join(html)))
show_notebook(os.path.join("nbpackage", "mynotebook.ipynb"))
from nbpackage import mynotebook
mynotebook.foo()
mynotebook.has_ip_syntax()
ls nbpackage/nbs
show_notebook(os.path.join("nbpackage", "nbs", "other.ipynb"))
from nbpackage.nbs import other
other.bar(5)
import shutil
from IPython.paths import get_ipython_package_dir
utils = os.path.join(get_ipython_package_dir(), 'utils')
shutil.copy(os.path.join("nbpackage", "mynotebook.ipynb"),
os.path.join(utils, "inside_ipython.ipynb")
)
from IPython.utils import inside_ipython
inside_ipython.whatsmyname()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Import hooks typically take the form of two objects
Step5: Notebook Loader
Step7: The Module Finder
Step8: Register the hook
Step9: After this point, my notebooks should be importable.
Step10: So I should be able to import nbpackage.mynotebook.
Step13: Aside
Step14: So my notebook has a heading cell and some code cells,
Step15: Hooray, it imported! Does it work?
Step16: Hooray again!
Step17: Notebooks in packages
Step18: Note that the __init__.py is necessary for nb to be considered a package,
Step19: So now we have importable notebooks, from both the local directory and inside packages.
Step20: and import the notebook from IPython.utils
|
13,926
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
Image(filename='circuit.png')
# %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from IPython.display import HTML, display
# For tables
def tableit(data):
display(HTML(
'<table><tr>{}</tr></table>'.format(
'</tr><tr>'.join(
'<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)
)
))
U0 = 17.1 # mains voltage in Volt (transformed from 230 V ~ to 17.1 V ~)
U_B = 5 # DC voltage from an external supply in Volts
R1 = 10000 # Ohm
R2 = 2400 # Ohm
R3 = 3000 # Ohm
f = 50 # Hz
fs = 44100 # Hz
duration = 0.06 # Duration of plots in seconds
t = np.arange(0, duration, 1 / fs)
I1 = (U0 * np.sin(2 * np.pi * f * t) * R2 + U_B * R1) / (R1 * (R2 + R3) + R2 * R3)
I2 = (U0 * np.sin(2 * np.pi * f * t) * R3 - U_B * (R1 + R3)) / (R1 * (R2 + R3) + R2 * R3)
I0 = I1 + I2
plt.plot(t * 1000, I0 * 1000, label="I0")
plt.plot(t * 1000, I1 * 1000, label="I1")
plt.plot(t * 1000, I2 * 1000, label="I2")
plt.title("Currents")
plt.ylabel("I / mA")
plt.xlabel("t / ms")
plt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.6))
plt.grid()
plt.show()
U_R1 = I0 * R1
U_R2 = I2 * R2
U_R3 = I1 * R3
signal = U_R3 # U_R3 == signal voltage
plt.plot(t * 1000, U_R1, label="U_R1")
plt.plot(t * 1000, U_R2, label="U_R2")
plt.plot(t * 1000, signal, label="signal")
plt.title("Voltages")
plt.ylabel("U / V")
plt.xlabel("t / ms")
plt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.6))
plt.grid()
plt.show()
signal_max = np.max(signal)
signal_min = np.min(signal)
signal_pp = signal_max - signal_min
offset = (signal_max + signal_min) / 2
tableit([["signal_max / V", "signal_min / V","signal_pp / V", "Offset / V"],
[np.around(signal_max, 2), np.around(signal_min, 2), np.around(signal_pp, 2), np.around(offset, 2)],
])
plt.plot(t * 1000, signal, label="Signal Voltage")
plt.title("Signal Voltage")
plt.ylabel("U / V")
plt.xlabel("t / ms")
plt.axhline(y=offset, color='r', linestyle='-', label='Offset')
plt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.6))
plt.ylim(-0.1, signal_max + 0.2)
plt.grid()
plt.show()
voltage_range = np.arange(U0 - 8, U0 + 8 ,0.1) # mains voltage fluctuations
I1_DC = (voltage_range * R2 + U_B * R1) / (R1 * (R3 + R2) + R3 * R2)
signal_DC = I1_DC * R3
plt.plot(voltage_range, signal_DC)
plt.title('Mains voltage fluctuations and the effect on the signal')
plt.xlabel('Mains Voltage / V')
plt.ylabel('Signal Voltage (DC) / V')
plt.grid()
plt.show()
Image(filename='schmitt.png')
Image(filename='schmitt_drawing.png')
R4 = 10000 # Ohm
R5 = 34800 # Ohm
R6 = 10000 # Ohm
R7 = 10000 # Ohm
U_aH = U_B
U_aL = 0
U_ref = U_B * R6 / (R6 + R7)
U_High = (R4 / (R4 + R5) * U_aL - U_ref) / (R4 / (R4 + R5) -1)
U_Low = (R4 / (R4 + R5) * U_aH - U_ref) / (R4 / (R4 + R5) -1)
tableit([["U_ref / V", "U_Low / V","U_High / V"],
[U_ref, np.around(U_Low, 2), np.around(U_High, 2)],
])
def hyst(x, th_lo, th_hi, initial = False):
hi = x >= th_hi
lo_or_hi = (x <= th_lo) | hi
ind = np.nonzero(lo_or_hi)[0]
if not ind.size: # prevent index error if ind is empty
return np.zeros_like(x, dtype=bool) | initial
cnt = np.cumsum(lo_or_hi) # from 0 to len(x)
return np.where(cnt, hi[ind[cnt-1]], initial)
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.add_patch(patches.Rectangle((U_Low, 0), U_High - U_Low, U_B, fill=False))
ax.set_title('Hysteresis')
ax.set_xlim([0,U_Low + 2]);
ax.set_ylim([0, U_B + 1]);
ax.set_xlabel('Ue / V')
ax.set_ylabel('Ua / V')
ax.arrow(U_High, U_B / 2 , 0, 0, head_width=0.2, head_length=0.3, fc='k', ec='k')
ax.arrow(U_Low, U_B / 2 , 0, -0.01, head_width=0.2, head_length=0.3, fc='k', ec='k')
ax.arrow((U_High + U_Low) / 2, U_B - 0.03 , -0.001, 0, head_width=0.2, head_length=0.3, fc='k', ec='k')
ax.arrow((U_High + U_Low) / 2, 0 , 0.001, 0, head_width=0.2, head_length=0.3, fc='k', ec='k')
plt.grid()
plt.show()
h1 = hyst(signal, U_Low, U_High)
plt.plot(t * 1000, signal, label='Signal Voltage')
plt.plot(t * 1000, U_B * h1, label='U_a')
plt.axhline(y=U_Low, color='k', linestyle='-', label='U_Low')
plt.axhline(y=U_High, color='r', linestyle='-', label='U_High')
plt.axhline(y=U_ref, color='y', linestyle='-', label='U_ref')
plt.title('Schmitt Trigger Result')
plt.xlabel('t / ms')
plt.ylabel('U / V')
plt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.6))
plt.ylim([0, U_aH + 0.5])
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameters
Step2: Currents
Step3: Voltages
Step4: Offset of signal voltage
Step5: Dependency of signal voltage on the mains voltage
Step6: Evaluation
Step7: Source
Step8: $$U_{High} = \frac{\frac{R_4}{R_4 + R_5} U_{a_L} - U_{ref}} {\frac{R_4}{R_4 + R_5} -1} $$
Step9: Turn-on Threshold
Step10: Turn-off Threshold
Step11: Hysteresis of a non-inverting Schmitt Trigger
|
13,927
|
<ASSISTANT_TASK:>
Python Code:
We begin by using an inbuilt iPython Magic function to display plots
within the window.
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib
print(matplotlib.__version__)
%matplotlib inline
import matplotlib.pyplot as chuck_norris
y = [1,2,3,4,5,4,3,2,1]
x = [2,4,6,8,10,12,10,8,6]
chuck_norris.plot(x, y, marker='D', linestyle='-.', color='m')
chuck_norris.plot([1,2,3,4,5,4,3,2,1], marker='^', linestyle='-', color='r')
chuck_norris.ylabel('Numbers')
#chuck_norris.show()
%matplotlib inline
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5]
y = [1, 4, 9, 16, 25]
# We have two lists, or more in mathematical terms, arrays, x and y
plt.plot(x, y)
# Import libraries
import matplotlib.pyplot as plt
%matplotlib inline
# Prepare the data
x = [1, 2, 3, 4, 5]
y = [1, 4, 9, 16, 25]
# Plot the data
plt.plot(x,y, label='Sales')
# Add a legend
plt.legend()
# Add more information
plt.xlabel('Adwords Spending (ZIM $)')
plt.ylabel('Monthly Sales (Oranges)')
plt.title('Effect of Adwords Spending on Monthly Sales')
plt.rcParams["figure.figsize"] = (15,7)
# Plot the data
plt.plot(x, y, label='Sales')
# Add a legend
plt.legend()
# Add more information
plt.xlabel('Adwords Spending (ZIM $)')
plt.ylabel('Monthly Sales (Oranges)')
plt.title('Effect of Adwords Spending on Monthly Sales')
%matplotlib inline
import matplotlib.pyplot as plt
y = [1,4,9,16,25,36,49,64,81,100]
x1 = [5,10,15,20,25,30,35,40,45,47]
x2 = [1,1,2,3,5,8,13,21,34,53]
plt.rcParams["figure.figsize"] = (15,7)
plt.plot(y,x1, marker='+', linestyle='--', color='b',label='Blue Shift')
plt.plot(y,x2, marker='o', linestyle='-', color='r', label='Red Shift')
plt.xlabel('Days to Election')
plt.ylabel('Popularity')
plt.title('Candidate Popularity')
plt.legend(loc='lower right')
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15,7)
# Declare Values
vals = [10, 5, 3, 5, 7,6]
xval = [1, 2, 3, 4, 5,6]
# Bar Plot
plt.bar(xval, vals)
plt.title('Sales per Executive')
plt.xlabel('ID Number')
plt.ylabel('Weekly Sales')
import numpy as np
import matplotlib.pyplot as plt
% matplotlib inline
plt.rcParams["figure.figsize"] = (15,7)
Y = []
for x in range(0,1000000):
Y.append(np.random.randn())
# Here 50 is the bin size. Try playing around with 10,100,200 etc and see how it effects the shape of the graph
plt.hist(Y, 500)
plt.title('Distribution of Random Numbers')
radius = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
# We import the math library.
# This can also be done as from math import pi
# Then instead of math.pi, we simply use pi
import math
import matplotlib.pyplot as plt
% matplotlib inline
plt.rcParams["figure.figsize"] = (15,7)
# How awesome is list comprehension!!
area = [round((r**2)*math.pi,2) for r in radius]
print(area)
plt.xlabel('Radius')
plt.ylabel('Area')
plt.title('Radius of Circle v Area')
plt.scatter(radius, area, color='g', s=30)
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams["figure.figsize"] = (15,7)
x = np.random.randn(1, 500)
y = np.random.randn(1,500)
plt.scatter(x, y, color='b', s=50) # s = size of the point
plt.xlabel('X axis')
plt.ylabel('Y axis')
plt.title('Scatter Plot')
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams["figure.figsize"] = (15,7)
fig = plt.figure()
# 121 = row,column,plot number
# Plot for Left Hand Side - 121 means
imgage1 = fig.add_subplot(121)
N=500
x = np.random.randn(N)
y = np.random.randn(N)
colors = np.random.rand(N)
size =(20 * np.random.rand(N))**2
plt.scatter(x, y, s=size, c=colors, alpha=0.4)
# Plot for Right Hand Side
imgage2 = fig.add_subplot(122)
N=1000
x1 = np.random.randn(N)
y1 = np.random.randn(N)
area= (5 * np.random.rand(N))**3
colors = ['magenta', 'blue', 'black', 'yellow',]
plt.scatter(x1, y1, s=area, c=colors, alpha=0.6)
imgage2.grid(True)
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15,7)
y = [1,4,9,16,25,36,49,64,81,100]
x1 = [5,10,15,20,25,30,35,40,45,47]
x2 = [1,1,2,3,5,8,13,21,34,53]
fig = plt.figure()
fig.suptitle("Candidate Popularity", fontsize="x-large")
# 121 = row,column,plot number
# Plot for Left Hand Side - 121 means
imgage011 = fig.add_subplot(121)
plt.xlabel('Days to Election')
plt.plot(y,x1, marker='+', linestyle='--', color='b')
# Plot for Right Hand Side
imgage2 = fig.add_subplot(122)
plt.xlabel('Days to Election')
plt.plot(y,x2, marker='o', linestyle='-', color='r')
#imgage2.grid(True)
## Alternate Method
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15,7)
fig = plt.figure()
fig.suptitle("Candidate Popularity", fontsize="x-large")
ax1 = fig.add_subplot(121)
ax1.plot(y, x1, 'r-')
ax1.set_title("Candidate 1")
ax2 = fig.add_subplot(122)
ax2.plot(y, x2, 'k-')
ax2.set_title("Candidate 2")
plt.tight_layout()
fig = plt.gcf()
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15,7)
y = [1,4,9,16,25,36,49,64,81,100]
x1 = [5,10,15,20,25,30,35,40,45,47]
x2 = [1,1,2,3,5,8,13,21,34,53]
fig = plt.figure()
fig.suptitle("Candidate Popularity", fontsize="x-large")
# 121 = row,column,plot number
# Plot for Left Hand Side - 121 means
imgage011 = fig.add_subplot(121)
plt.xlabel('Days to Election')
plt.plot(y,x1, marker='+', linestyle='--', color='b')
# Plot for Right Hand Side
imgage2 = fig.add_subplot(122)
plt.xlabel('Days to Election')
plt.plot(y,x2, marker='o', linestyle='-', color='r')
#imgage2.grid(True)
# Save Figure
plt.savefig("images/pop.png")
# Save Transparent Figure
plt.savefig("images/pop2.png", transparent=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Table of Contents
Step2: import matplotlib.pyplot as plt is python convention. <br>
Step3: So as you see, the convention plt can save you from typing chuck_norris every single time.
Step4: Let's break down what's happening.
Step5: But this is too small. Let's specify the size of the plot. Note that you set it once at the very top, right after you import your libraries, or keep varying it every time you want to plot a graph.
Step6: More Parameters for Line Plots
Step7: Bar Plots
Step8: Histograms
Step9: Scatterplots
Step10: Another Scatterplot Example
Step11: Grids
Step12: Saving Plots
|
13,928
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
subjects = pd.read_csv('./data/subjects.csv')
nodes = pd.read_csv('./data/nodes.csv')
merged = pd.merge(nodes, subjects, on="subjectID")
merged.head()
import matplotlib.pyplot as plt
import seaborn as sns
stats = nodes.columns.drop(["subjectID", "tractID", "nodeID"])
print(stats[0])
stat = merged[["nodeID", "subjectID", "tractID", stats[0]]]
tract_stat = stat[stat["tractID"] == stat["tractID"].values[0]]
tract_stat.head()
tract_p = tract_stat.pivot(index='nodeID', columns='subjectID', values=stats[0])
import numpy as np
sns.tsplot(tract_p.values.T, err_style="unit_traces", estimator=np.nanmean)
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
estimator = Pipeline([("impute", Imputer()), ("cluster", KMeans(n_clusters=2))])
clusters = estimator.fit(tract_p.values.T).steps[1][1]
labels = clusters.labels_
x, y = estimator.fit_transform(tract_p.values.T).T
plt.scatter(x, y, c=labels.astype(np.float))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Merging nodes and subjects
Step2: Visualizing the data
Step3: We focus on the calculated diffusion statistics that are included in the nodes table
Step4: And specifically on the very first one
Step5: Select a single tract
Step6: Analyzing data
Step7: We create a pipeline that imputes nan values (that sometimes occur in tract profiles), and clusters the results into two clusters
Step8: We compute the clusters and transform the data into cluster distance space
Step9: We plot the results in the latent cluster space
|
13,929
|
<ASSISTANT_TASK:>
Python Code:
%run "../Functions/2. Google form analysis.ipynb"
import mca
np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
pd.set_option('display.precision', 5)
pd.set_option('display.max_columns', 25)
data = pd.read_table('../../data/burgundies.csv',sep=',', skiprows=1, index_col=0, header=0)
X = data.drop('oak_type', axis=1)
j_sup = data.oak_type
i_sup = np.array([0, 1, 0, 1, 0, .5, .5, 1, 0, 1, 0, 0, 1, 0, .5, .5, 1, 0, .5, .5, 0, 1])
ncols = 10
X.shape, j_sup.shape, i_sup.shape
src_index = (['Expert 1'] * 7 + ['Expert 2'] * 9 + ['Expert 3'] * 6)
var_index = (['fruity'] * 2 + ['woody'] * 3 + ['coffee'] * 2 + ['fruity'] * 2
+ ['roasted'] * 2 + ['vanillin'] * 3 + ['woody'] * 2 + ['fruity'] * 2
+ ['butter'] * 2 + ['woody'] * 2)
yn = ['y','n']; rg = ['1', '2', '3']; val_index = yn + rg + yn*3 + rg + yn*4
col_index = pd.MultiIndex.from_arrays([src_index, var_index, val_index],
names=['source', 'variable', 'value'])
table1 = pd.DataFrame(data=X.values, index=X.index, columns=col_index)
table1.loc['W?'] = i_sup
table1['','Oak Type',''] = j_sup
table1
mca_ben = mca.MCA(X, ncols=ncols)
mca_ind = mca.MCA(X, ncols=ncols, benzecri=False)
print(mca.MCA.__doc__)
data = {'Iλ': pd.Series(mca_ind.L),
'τI': mca_ind.expl_var(greenacre=False, N=4),
'Zλ': pd.Series(mca_ben.L),
'τZ': mca_ben.expl_var(greenacre=False, N=4),
'cλ': pd.Series(mca_ben.L),
'τc': mca_ind.expl_var(greenacre=True, N=4)}
# 'Indicator Matrix', 'Benzecri Correction', 'Greenacre Correction'
columns = ['Iλ', 'τI', 'Zλ', 'τZ', 'cλ', 'τc']
table2 = pd.DataFrame(data=data, columns=columns).fillna(0)
table2.index += 1
table2.loc['Σ'] = table2.sum()
table2.index.name = 'Factor'
table2
mca_ind.inertia, mca_ind.L.sum(), mca_ben.inertia, mca_ben.L.sum()
data = np.array([mca_ben.L[:2],
mca_ben.expl_var(greenacre=True, N=2) * 100]).T
df = pd.DataFrame(data=data, columns=['cλ','%c'], index=range(1,3))
df
fs, cos, cont = 'Factor score','Squared cosines', 'Contributions x 1000'
table3 = pd.DataFrame(columns=X.index, index=pd.MultiIndex
.from_product([[fs, cos, cont], range(1, 3)]))
table3.loc[fs, :] = mca_ben.fs_r(N=2).T
table3.loc[cos, :] = mca_ben.cos_r(N=2).T
table3.loc[cont, :] = mca_ben.cont_r(N=2).T * 1000
table3.loc[fs, 'W?'] = mca_ben.fs_r_sup(pd.DataFrame([i_sup]), N=2)[0]
np.round(table3.astype(float), 2)
table4 = pd.DataFrame(columns=col_index, index=pd.MultiIndex
.from_product([[fs, cos, cont], range(1, 3)]))
table4.loc[fs, :] = mca_ben.fs_c(N=2).T
table4.loc[cos, :] = mca_ben.cos_c(N=2).T
table4.loc[cont,:] = mca_ben.cont_c(N=2).T * 1000
fs_c_sup = mca_ben.fs_c_sup(mca.dummy(pd.DataFrame(j_sup)), N=2)
table4.loc[fs, ('Oak', '', 1)] = fs_c_sup[0]
table4.loc[fs, ('Oak', '', 2)] = fs_c_sup[1]
np.round(table4.astype(float), 2)
%matplotlib inline
import matplotlib.pyplot as plt
points = table3.loc[fs].values
labels = table3.columns.values
plt.figure()
plt.margins(0.1)
plt.axhline(0, color='gray')
plt.axvline(0, color='gray')
plt.xlabel('Factor 1')
plt.ylabel('Factor 2')
plt.scatter(*points, s=120, marker='o', c='r', alpha=.5, linewidths=0)
for label, x, y in zip(labels, *points):
plt.annotate(label, xy=(x, y), xytext=(x + .03, y + .03))
plt.show()
noise = 0.05 * (np.random.rand(*table4.T[fs].shape) - 0.5)
fs_by_source = table4.T[fs].add(noise).groupby(level=['source'])
fig, ax = plt.subplots()
plt.margins(0.1)
plt.axhline(0, color='gray')
plt.axvline(0, color='gray')
plt.xlabel('Factor 1')
plt.ylabel('Factor 2')
ax.margins(0.1)
markers = '^', 's', 'o', 'o'
colors = 'r', 'g', 'b', 'y'
for fscore, marker, color in zip(fs_by_source, markers, colors):
label, points = fscore
ax.plot(*points.T.values, marker=marker, color=color, label=label, linestyle='', alpha=.5, mew=0, ms=12)
ax.legend(numpoints=1, loc=4)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MCA
Step2: For input format, mca uses
Step3: Table 1
Step4: MCA
Step5: Table 2 (L, expl_var)
Step6: The inertia is simply the sum of the principle inertias
Step7: Table 3 (fs_r, cos_r, cont_r, fs_r_sup)
Step8: Factor scores, squared cosines, and contributions for the observations are computed by fs_r, cos_r and cont_r methods respectively, where r denotes rows (i.e. observations). Again, N limits the number of retained factors.
Step9: Table 4 (fs_c, cos_c, cont_c, fs_c_sup)
Step10: Figure 1
|
13,930
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!sudo apt -y install libportaudio2
!pip install -q --use-deprecated=legacy-resolver tflite-model-maker
!pip install -q pycocotools
!pip install -q opencv-python-headless==4.1.2.30
!pip uninstall -y tensorflow && pip install -q tensorflow==2.8.0
import numpy as np
import os
from tflite_model_maker.config import QuantizationConfig
from tflite_model_maker.config import ExportFormat
from tflite_model_maker import model_spec
from tflite_model_maker import object_detector
import tensorflow as tf
assert tf.__version__.startswith('2')
tf.get_logger().setLevel('ERROR')
from absl import logging
logging.set_verbosity(logging.ERROR)
spec = model_spec.get('efficientdet_lite0')
train_data, validation_data, test_data = object_detector.DataLoader.from_csv('gs://cloud-ml-data/img/openimage/csv/salads_ml_use.csv')
model = object_detector.create(train_data, model_spec=spec, batch_size=8, train_whole_model=True, validation_data=validation_data)
model.evaluate(test_data)
model.export(export_dir='.')
model.evaluate_tflite('model.tflite', test_data)
#@title Load the trained TFLite model and define some visualization functions
import cv2
from PIL import Image
model_path = 'model.tflite'
# Load the labels into a list
classes = ['???'] * model.model_spec.config.num_classes
label_map = model.model_spec.config.label_map
for label_id, label_name in label_map.as_dict().items():
classes[label_id-1] = label_name
# Define a list of colors for visualization
COLORS = np.random.randint(0, 255, size=(len(classes), 3), dtype=np.uint8)
def preprocess_image(image_path, input_size):
Preprocess the input image to feed to the TFLite model
img = tf.io.read_file(image_path)
img = tf.io.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.uint8)
original_image = img
resized_img = tf.image.resize(img, input_size)
resized_img = resized_img[tf.newaxis, :]
resized_img = tf.cast(resized_img, dtype=tf.uint8)
return resized_img, original_image
def detect_objects(interpreter, image, threshold):
Returns a list of detection results, each a dictionary of object info.
signature_fn = interpreter.get_signature_runner()
# Feed the input image to the model
output = signature_fn(images=image)
# Get all outputs from the model
count = int(np.squeeze(output['output_0']))
scores = np.squeeze(output['output_1'])
classes = np.squeeze(output['output_2'])
boxes = np.squeeze(output['output_3'])
results = []
for i in range(count):
if scores[i] >= threshold:
result = {
'bounding_box': boxes[i],
'class_id': classes[i],
'score': scores[i]
}
results.append(result)
return results
def run_odt_and_draw_results(image_path, interpreter, threshold=0.5):
Run object detection on the input image and draw the detection results
# Load the input shape required by the model
_, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']
# Load the input image and preprocess it
preprocessed_image, original_image = preprocess_image(
image_path,
(input_height, input_width)
)
# Run object detection on the input image
results = detect_objects(interpreter, preprocessed_image, threshold=threshold)
# Plot the detection results on the input image
original_image_np = original_image.numpy().astype(np.uint8)
for obj in results:
# Convert the object bounding box from relative coordinates to absolute
# coordinates based on the original image resolution
ymin, xmin, ymax, xmax = obj['bounding_box']
xmin = int(xmin * original_image_np.shape[1])
xmax = int(xmax * original_image_np.shape[1])
ymin = int(ymin * original_image_np.shape[0])
ymax = int(ymax * original_image_np.shape[0])
# Find the class index of the current object
class_id = int(obj['class_id'])
# Draw the bounding box and label on the image
color = [int(c) for c in COLORS[class_id]]
cv2.rectangle(original_image_np, (xmin, ymin), (xmax, ymax), color, 2)
# Make adjustments to make the label visible for all objects
y = ymin - 15 if ymin - 15 > 15 else ymin + 15
label = "{}: {:.0f}%".format(classes[class_id], obj['score'] * 100)
cv2.putText(original_image_np, label, (xmin, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# Return the final image
original_uint8 = original_image_np.astype(np.uint8)
return original_uint8
#@title Run object detection and show the detection results
INPUT_IMAGE_URL = "https://storage.googleapis.com/cloud-ml-data/img/openimage/3/2520/3916261642_0a504acd60_o.jpg" #@param {type:"string"}
DETECTION_THRESHOLD = 0.3 #@param {type:"number"}
TEMP_FILE = '/tmp/image.png'
!wget -q -O $TEMP_FILE $INPUT_IMAGE_URL
im = Image.open(TEMP_FILE)
im.thumbnail((512, 512), Image.ANTIALIAS)
im.save(TEMP_FILE, 'PNG')
# Load the TFLite model
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
# Run inference and draw detection result on the local copy of the original file
detection_result_image = run_odt_and_draw_results(
TEMP_FILE,
interpreter,
threshold=DETECTION_THRESHOLD
)
# Show the detection result
Image.fromarray(detection_result_image)
! curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
! echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list
! sudo apt-get update
! sudo apt-get install edgetpu-compiler
NUMBER_OF_TPUS = 1#@param {type:"number"}
!edgetpu_compiler model.tflite --num_segments=$NUMBER_OF_TPUS
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Object Detection with TensorFlow Lite Model Maker
Step2: Import the required packages.
Step3: Prepare the dataset
Step4: Step 2. Load the dataset.
Step5: Step 3. Train the TensorFlow model with the training data.
Step6: Step 4. Evaluate the model with the test data.
Step7: Step 5. Export as a TensorFlow Lite model.
Step8: Step 6. Evaluate the TensorFlow Lite model.
Step12: You can download the TensorFlow Lite model file using the left sidebar of Colab. Right-click on the model.tflite file and choose Download to download it to your local computer.
Step13: (Optional) Compile For the Edge TPU
Step 1. Install the EdgeTPU Compiler
Step14: Step 2. Select number of Edge TPUs, Compile
|
13,931
|
<ASSISTANT_TASK:>
Python Code:
from hgvs.easy import (__version__, parser, hdp, vm)
from hgvs.exceptions import HGVSDataNotAvailableError
__version__
# hgvs_g = "NC_000010.11:g.94762693G>A" # GRCh38
hgvs_g = "NC_000010.10:g.96522450G>A" # GRCh37
hgvs_c = "NM_000769.4:c.-13G>A"
var_c = parser.parse(hgvs_c)
var_g = parser.parse(hgvs_g)
hdp.get_tx_mapping_options(var_c.ac)
# or, for a more complete example with many options:
hdp.get_tx_mapping_options("NM_001807.4")
alignments = hdp.get_tx_for_gene("CYP2C19")
alignments.sort(key=lambda a: (a["tx_ac"], a["alt_ac"], a["alt_aln_method"]))
alignments
start = var_g.posedit.pos.start.base
end = var_g.posedit.pos.end.base
alignments = hdp.get_alignments_for_region(var_g.ac, start, end)
alignments.sort(key=lambda a: (a["tx_ac"], a["alt_ac"], a["alt_aln_method"]))
alignments
try:
vm.c_to_g(var_c, "NC_000010.10")
except HGVSDataNotAvailableError as e:
print(f"Got {e!r}")
vm.c_to_g(var_c, "NC_000010.10", alt_aln_method="splign-manual")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Discovering available alignments
Step2: Alignments for a gene
Step3: Alignments for a genomic region (new method)
Step4: Alternate method for transcript-to-genome projections
|
13,932
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import scipy.stats as ss
import numpy.linalg as linalg
x1 = [1, 1, -1, -1]
x2 = [1, -1, 1, -1]
y = [1.2, 3.2, 4.1, 3.6]
x_mat = np.column_stack((np.ones(4), x1, x2))
x_mat
beta, *_ = linalg.lstsq(x_mat, y)
y_hat = x_mat @ beta
resids = (y - y_hat)
SSR = np.sum(resids**2)
se2_epsilon = SSR / (len(x) - len(beta))
se2_beta = se2_epsilon * linalg.inv(x_mat.transpose() @ x_mat)
print(np.sqrt(se2_beta), np.sqrt(se2_epsilon))
df = len(x) - len(beta)
print('df = ', df)
for i in range(len(beta)):
#get our T-value for the confidence interval
T = ss.t.ppf(0.975, df)
# Get the width of the confidence interval using our previously computed standard error
cwidth = T * np.sqrt(se2_beta[i,i])
# print the result, using 2 - i to match our numbering above
hypothesis_T = -abs(beta[i]) / np.sqrt(se2_beta[i,i])
p = 2 * ss.t.cdf(hypothesis_T, df + 1) # +1 because null hypothesis doesn't include coefficient
print(f'beta_{i} is {beta[i]:.2f} +/- {cwidth:.2f} with 95% confidence. p-value: {p:.2f} (T = {hypothesis_T:.2f})')
from statsmodels.formula.api import ols
x1 = [1, 1, -1, -1]
x2 = [1, -1, 1, -1]
y = [1.2, 3.2, 4.1, 3.6]
data = {'x1': x1, 'x2': x2, 'y': y}
model = ols('y ~ x1 + x2', data=data).fit()
model.summary()
sm.stats.anova_lm(model)
xw = [0, 1, 0, 0, 1, 0, 1, 1]
xs = [0, 0, 1, 0, 1, 1, 0, 1]
xm = [0, 0, 0, 1, 0, 1, 1, 1]
y = [0.4, 0.3, 0.3, 0.2, 4.6, 0.3, 0.2, 5.2, 0.3, 0.2, 0.4, 0.3, 5.0, 0.3, 0.3, 5.0]
# we do xw + xw because we have 2 replicates at each condition
data = {'xw': xw + xw, 'xs': xs + xs, 'xm': xm + xm, 'y': y}
model = ols('y~xw + xs + xm + xw * xs + xw * xm + xs * xm + xw * xm * xs', data=data).fit()
sm.stats.anova_lm(model, typ=2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll use multidimensional ordinary least squares with an intercept
Step2: We'll compute our coefficients and their standard error
Step3: Now we can compute p-values and confidence intervals
Step4: So we found that our intercept is likely necessary (p < 0.05), but the two factors do not have a significant effect. We also found that factor 1 is more important than factor 2 as judged from the p-value
Step5: Interpreting Statsmodels
Step6: The ANOVA test gives information about each factor. The df is the degrees of freedom used to model each factor, the sum_sq is difference between the grand mean response and mean response of the treatment, the mean_sq is the sum_sq divided by the degrees of freedom, the F is an F-test statistic (like a T statistic from a t-test), and the final column contains p-value for the existence of each treatment.
|
13,933
|
<ASSISTANT_TASK:>
Python Code:
iloczyn = set([1, 2, 3, 4, 5]) & set([3, 4])
suma = set([1, 2, 3,]) | set([4, 5])
roznica = set([1, 2, 3, 4, 5]) - set([4, 5])
print(iloczyn)
print(suma)
print(roznica)
a = [1, 2, 3, 4]
b = [2, 3]
zbior1 = set(a)
zbior2 = set(b)
iloczyn = zbior1 & zbior2
print(zbior2)
imiona = {
"andrzej": 10,
"bartek": 15
}
print(imiona)
print(imiona.keys())
print(imiona.values())
print( dict([("andrzej", 10), (10, 5)]))
imiona = dict(andrzej=10, bartek=5)
print(imiona)
print(imiona.items())
print(dict.fromkeys('abcd', []))
imiona = dict(andrzej=10, bartek=5)
print(imiona.get("andrzej2", 5))
print("andrzej" in imiona)
print(imiona.pop("andrzej"))
print(imiona.pop("andrzej2", "ala ma kota"))
imiona = dict(andrzej=10, bartek=5)
print(imiona.setdefault("andrzej"))
print(imiona.setdefault("klucz którego nie ma", "domyślna wartość"))
from collections import defaultdict
imiona = defaultdict(list)
imiona["andrzej"].append(5)
imiona
from collections import defaultdict
imiona = defaultdict(set)
imiona["andrzej"].add(5)
imiona
from collections import defaultdict
imiona = defaultdict(int)
imiona["andrzej"] += 1
imiona["andrzej2"] -= 5
imiona
from collections import defaultdict
imiona = defaultdict(lambda: 10)
imiona["andrzej"] += 1
print("bartek" in imiona)
print(10 in imiona)
imiona
from operator import itemgetter
dane = {"a": 1, "b": 2, "c": 4, "d": 5}
pobierz_wartosci = itemgetter("c", "d")
pobierz_wartosci(dane)
rows = [
{"a": 1, "b": 2, "c": 4, "d": 5},
{"a": 4, "b": 2, "c": 4, "d": 10},
{"a": 2, "b": 2, "c": 4, "d": 7}
]
sorted(rows, key=itemgetter("a", "d"))
from collections import OrderedDict
a = OrderedDict.fromkeys('abcde', [])
print(a)
print(a.keys())
a.move_to_end('b')
print(a)
a.move_to_end('b', last=False)
print(a)
a = OrderedDict.fromkeys('abcde', [])
print(a.popitem())
print(a.popitem())
print(a.popitem(last=False))
from collections import Counter
ct = Counter('aaaabbbcdaslfhsdlfdas')
print(ct)
print(ct.most_common(3))
print(ct.elements())
print(sorted(ct.elements()))
print(sum(Counter('aaaabbbcdaslfhsdlfdas').values()))
print(list(Counter('aaaabbbcdaslfhsdlfdas')))
c = Counter('aaaabbbcdaslfhsdlfdas')
c.clear()
print(c)
c = Counter('aaabbb')
c['e'] = -4
c['d'] = 0
print(c)
print(+c)
c = Counter('aaabbb')
print(c)
c.subtract(Counter('abb'))
print(c)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Słowniki (Maps/Dictionaries)
Step2: OrderedDict
Step3: Counter
|
13,934
|
<ASSISTANT_TASK:>
Python Code:
# それぞれ必要なものを import するけど、こういう風に短く書くのがこっち界隈だと一般的らしい
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Creating a Series by passing a list of values, letting pandas create a default integer index:
# リストを指定してシリーズを作成すると、Pandasはデフォルトで数値のインデックスを生成する
s = pd.Series([1,3,5,np.nan,6,8])
s
# まずは日付のインデックスを作成
dates = pd.date_range('20130101', periods=6)
dates
# Creating a DataFrame by passing a numpy array, with a datetime index and labeled columns:
# numpy arrayを渡してDataFrameを作成する時に、インデックスと列名を指定
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))
df
# Creating a DataFrame by passing a dict of objects that can be converted to series-like.
# DataFrame作成時に辞書を渡すと、それぞれの辞書の値を Series のように扱う
df2 = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo' })
df2
# Having specific dtypes
# それぞれ特定のデータ型(dtypes)を持つ
df2.dtypes
# If you’re using IPython, tab completion for column names (as well as public attributes) is automatically enabled.
# Here’s a subset of the attributes that will be completed:
# IPython使っていたら db2. で TAB を入力すると列名が自動的に補完対象になる
# Jupyter notebok だと TAB を入力すると補完するための一覧がリストボックスで表示される
# See the top & bottom rows of the frame
# DataFrame の先頭と最後の行を参照する
df.head()
df.tail(3)
# Display the index, columns, and the underlying numpy data
# インデックス、列、値を参照する
print(df.index)
print(df.columns)
print(df.values)
# Describe shows a quick statistic summary of your data
# データの特徴(平均、中央値、最大、最小など)を表示する
df.describe()
# Transposing your data
# データを転置する(インデックスと列を入れ替える)
df.T
# Sorting by an axis
# インデックスでソートする(axis=1なので横方向)
df.sort_index(axis=1, ascending=False)
# Sorting by values
# 値でソートする(ここではB列を指定している)
df.sort_values(by='B')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Object creation
Step2: Viewing Data
|
13,935
|
<ASSISTANT_TASK:>
Python Code:
!pip install -q -U "tensorflow-text==2.8.*"
!pip install -q tf-models-official==2.4.0
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
from official.modeling import tf_utils
from official import nlp
from official.nlp import bert
# Load the required submodules
import official.nlp.optimization
import official.nlp.bert.bert_models
import official.nlp.bert.configs
import official.nlp.bert.run_classifier
import official.nlp.bert.tokenization
import official.nlp.data.classifier_data_lib
import official.nlp.modeling.losses
import official.nlp.modeling.models
import official.nlp.modeling.networks
gs_folder_bert = "gs://cloud-tpu-checkpoints/bert/v3/uncased_L-12_H-768_A-12"
tf.io.gfile.listdir(gs_folder_bert)
hub_url_bert = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3"
glue, info = tfds.load('glue/mrpc', with_info=True,
# It's small, load the whole dataset
batch_size=-1)
list(glue.keys())
info.features
info.features['label'].names
glue_train = glue['train']
for key, value in glue_train.items():
print(f"{key:9s}: {value[0].numpy()}")
# TODO 1
# Set up tokenizer to generate Tensorflow dataset
tokenizer = bert.tokenization.FullTokenizer(
vocab_file=os.path.join(gs_folder_bert, "vocab.txt"),
do_lower_case=True)
print("Vocab size:", len(tokenizer.vocab))
tokens = tokenizer.tokenize("Hello TensorFlow!")
print(tokens)
ids = tokenizer.convert_tokens_to_ids(tokens)
print(ids)
tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])
def encode_sentence(s):
tokens = list(tokenizer.tokenize(s.numpy()))
tokens.append('[SEP]')
return tokenizer.convert_tokens_to_ids(tokens)
sentence1 = tf.ragged.constant([
encode_sentence(s) for s in glue_train["sentence1"]])
sentence2 = tf.ragged.constant([
encode_sentence(s) for s in glue_train["sentence2"]])
print("Sentence1 shape:", sentence1.shape.as_list())
print("Sentence2 shape:", sentence2.shape.as_list())
cls = [tokenizer.convert_tokens_to_ids(['[CLS]'])]*sentence1.shape[0]
input_word_ids = tf.concat([cls, sentence1, sentence2], axis=-1)
_ = plt.pcolormesh(input_word_ids.to_tensor())
input_mask = tf.ones_like(input_word_ids).to_tensor()
plt.pcolormesh(input_mask)
type_cls = tf.zeros_like(cls)
type_s1 = tf.zeros_like(sentence1)
type_s2 = tf.ones_like(sentence2)
input_type_ids = tf.concat([type_cls, type_s1, type_s2], axis=-1).to_tensor()
plt.pcolormesh(input_type_ids)
def encode_sentence(s, tokenizer):
tokens = list(tokenizer.tokenize(s))
tokens.append('[SEP]')
return tokenizer.convert_tokens_to_ids(tokens)
def bert_encode(glue_dict, tokenizer):
num_examples = len(glue_dict["sentence1"])
sentence1 = tf.ragged.constant([
encode_sentence(s, tokenizer)
for s in np.array(glue_dict["sentence1"])])
sentence2 = tf.ragged.constant([
encode_sentence(s, tokenizer)
for s in np.array(glue_dict["sentence2"])])
cls = [tokenizer.convert_tokens_to_ids(['[CLS]'])]*sentence1.shape[0]
input_word_ids = tf.concat([cls, sentence1, sentence2], axis=-1)
input_mask = tf.ones_like(input_word_ids).to_tensor()
type_cls = tf.zeros_like(cls)
type_s1 = tf.zeros_like(sentence1)
type_s2 = tf.ones_like(sentence2)
input_type_ids = tf.concat(
[type_cls, type_s1, type_s2], axis=-1).to_tensor()
inputs = {
'input_word_ids': input_word_ids.to_tensor(),
'input_mask': input_mask,
'input_type_ids': input_type_ids}
return inputs
glue_train = bert_encode(glue['train'], tokenizer)
glue_train_labels = glue['train']['label']
glue_validation = bert_encode(glue['validation'], tokenizer)
glue_validation_labels = glue['validation']['label']
glue_test = bert_encode(glue['test'], tokenizer)
glue_test_labels = glue['test']['label']
# TODO 2
# Print the key value and shapes
for key, value in glue_train.items():
print(f'{key:15s} shape: {value.shape}')
print(f'glue_train_labels shape: {glue_train_labels.shape}')
import json
bert_config_file = os.path.join(gs_folder_bert, "bert_config.json")
config_dict = json.loads(tf.io.gfile.GFile(bert_config_file).read())
bert_config = bert.configs.BertConfig.from_dict(config_dict)
config_dict
bert_classifier, bert_encoder = bert.bert_models.classifier_model(
bert_config, num_labels=2)
tf.keras.utils.plot_model(bert_classifier, show_shapes=True, dpi=48)
glue_batch = {key: val[:10] for key, val in glue_train.items()}
bert_classifier(
glue_batch, training=True
).numpy()
tf.keras.utils.plot_model(bert_encoder, show_shapes=True, dpi=48)
checkpoint = tf.train.Checkpoint(encoder=bert_encoder)
checkpoint.read(
os.path.join(gs_folder_bert, 'bert_model.ckpt')).assert_consumed()
TODO 3
# Set up epochs and steps
epochs = 3
batch_size = 32
eval_batch_size = 32
train_data_size = len(glue_train_labels)
steps_per_epoch = int(train_data_size / batch_size)
num_train_steps = steps_per_epoch * epochs
warmup_steps = int(epochs * train_data_size * 0.1 / batch_size)
# creates an optimizer with learning rate schedule
optimizer = nlp.optimization.create_optimizer(
2e-5, num_train_steps=num_train_steps, num_warmup_steps=warmup_steps)
type(optimizer)
# TODO 4
metrics = [tf.keras.metrics.SparseCategoricalAccuracy('accuracy', dtype=tf.float32)]
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
bert_classifier.compile(
optimizer=optimizer,
loss=loss,
metrics=metrics)
# Train the model
bert_classifier.fit(
glue_train, glue_train_labels,
validation_data=(glue_validation, glue_validation_labels),
batch_size=32,
epochs=epochs)
my_examples = bert_encode(
glue_dict = {
'sentence1':[
'The rain in Spain falls mainly on the plain.',
'Look I fine tuned BERT.'],
'sentence2':[
'It mostly rains on the flat lands of Spain.',
'Is it working? This does not match.']
},
tokenizer=tokenizer)
result = bert_classifier(my_examples, training=False)
result = tf.argmax(result).numpy()
result
np.array(info.features['label'].names)[result]
export_dir='./saved_model'
tf.saved_model.save(bert_classifier, export_dir=export_dir)
reloaded = tf.saved_model.load(export_dir)
reloaded_result = reloaded([my_examples['input_word_ids'],
my_examples['input_mask'],
my_examples['input_type_ids']], training=False)
original_result = bert_classifier(my_examples, training=False)
# The results are (nearly) identical:
print(original_result.numpy())
print()
print(reloaded_result.numpy())
processor = nlp.data.classifier_data_lib.TfdsProcessor(
tfds_params="dataset=glue/mrpc,text_key=sentence1,text_b_key=sentence2",
process_text_fn=bert.tokenization.convert_to_unicode)
# TODO 5
# Set up output of training and evaluation Tensorflow dataset
train_data_output_path="./mrpc_train.tf_record"
eval_data_output_path="./mrpc_eval.tf_record"
max_seq_length = 128
batch_size = 32
eval_batch_size = 32
# Generate and save training data into a tf record file
input_meta_data = (
nlp.data.classifier_data_lib.generate_tf_record_from_data_file(
processor=processor,
data_dir=None, # It is `None` because data is from tfds, not local dir.
tokenizer=tokenizer,
train_data_output_path=train_data_output_path,
eval_data_output_path=eval_data_output_path,
max_seq_length=max_seq_length))
training_dataset = bert.run_classifier.get_dataset_fn(
train_data_output_path,
max_seq_length,
batch_size,
is_training=True)()
evaluation_dataset = bert.run_classifier.get_dataset_fn(
eval_data_output_path,
max_seq_length,
eval_batch_size,
is_training=False)()
training_dataset.element_spec
def create_classifier_dataset(file_path, seq_length, batch_size, is_training):
Creates input dataset from (tf)records files for train/eval.
dataset = tf.data.TFRecordDataset(file_path)
if is_training:
dataset = dataset.shuffle(100)
dataset = dataset.repeat()
def decode_record(record):
name_to_features = {
'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),
'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
'label_ids': tf.io.FixedLenFeature([], tf.int64),
}
return tf.io.parse_single_example(record, name_to_features)
def _select_data_from_record(record):
x = {
'input_word_ids': record['input_ids'],
'input_mask': record['input_mask'],
'input_type_ids': record['segment_ids']
}
y = record['label_ids']
return (x, y)
dataset = dataset.map(decode_record,
num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.map(
_select_data_from_record,
num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=is_training)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
# Set up batch sizes
batch_size = 32
eval_batch_size = 32
# Return Tensorflow dataset
training_dataset = create_classifier_dataset(
train_data_output_path,
input_meta_data['max_seq_length'],
batch_size,
is_training=True)
evaluation_dataset = create_classifier_dataset(
eval_data_output_path,
input_meta_data['max_seq_length'],
eval_batch_size,
is_training=False)
training_dataset.element_spec
# Note: 350MB download.
import tensorflow_hub as hub
hub_model_name = "bert_en_uncased_L-12_H-768_A-12"
hub_encoder = hub.KerasLayer(f"https://tfhub.dev/tensorflow/{hub_model_name}/3",
trainable=True)
print(f"The Hub encoder has {len(hub_encoder.trainable_variables)} trainable variables")
result = hub_encoder(
inputs=dict(
input_word_ids=glue_train['input_word_ids'][:10],
input_mask=glue_train['input_mask'][:10],
input_type_ids=glue_train['input_type_ids'][:10],),
training=False,
)
print("Pooled output shape:", result['pooled_output'].shape)
print("Sequence output shape:", result['sequence_output'].shape)
hub_classifier = nlp.modeling.models.BertClassifier(
bert_encoder,
num_classes=2,
dropout_rate=0.1,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02))
tf.keras.utils.plot_model(hub_classifier, show_shapes=True, dpi=64)
bert_encoder_config = config_dict.copy()
# You need to rename a few fields to make this work:
bert_encoder_config['attention_dropout_rate'] = bert_encoder_config.pop('attention_probs_dropout_prob')
bert_encoder_config['activation'] = tf_utils.get_activation(bert_encoder_config.pop('hidden_act'))
bert_encoder_config['dropout_rate'] = bert_encoder_config.pop('hidden_dropout_prob')
bert_encoder_config['initializer'] = tf.keras.initializers.TruncatedNormal(
stddev=bert_encoder_config.pop('initializer_range'))
bert_encoder_config['max_sequence_length'] = bert_encoder_config.pop('max_position_embeddings')
bert_encoder_config['num_layers'] = bert_encoder_config.pop('num_hidden_layers')
bert_encoder_config
manual_encoder = nlp.modeling.networks.BertEncoder(**bert_encoder_config)
checkpoint = tf.train.Checkpoint(encoder=manual_encoder)
checkpoint.read(
os.path.join(gs_folder_bert, 'bert_model.ckpt')).assert_consumed()
result = manual_encoder(my_examples, training=True)
print("Sequence output shape:", result[0].shape)
print("Pooled output shape:", result[1].shape)
manual_classifier = nlp.modeling.models.BertClassifier(
bert_encoder,
num_classes=2,
dropout_rate=bert_encoder_config['dropout_rate'],
initializer=bert_encoder_config['initializer'])
manual_classifier(my_examples, training=True).numpy()
optimizer = nlp.optimization.create_optimizer(
2e-5, num_train_steps=num_train_steps, num_warmup_steps=warmup_steps)
epochs = 3
batch_size = 32
eval_batch_size = 32
train_data_size = len(glue_train_labels)
steps_per_epoch = int(train_data_size / batch_size)
num_train_steps = steps_per_epoch * epochs
decay_schedule = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=2e-5,
decay_steps=num_train_steps,
end_learning_rate=0)
plt.plot([decay_schedule(n) for n in range(num_train_steps)])
warmup_steps = num_train_steps * 0.1
warmup_schedule = nlp.optimization.WarmUp(
initial_learning_rate=2e-5,
decay_schedule_fn=decay_schedule,
warmup_steps=warmup_steps)
# The warmup overshoots, because it warms up to the `initial_learning_rate`
# following the original implementation. You can set
# `initial_learning_rate=decay_schedule(warmup_steps)` if you don't like the
# overshoot.
plt.plot([warmup_schedule(n) for n in range(num_train_steps)])
optimizer = nlp.optimization.AdamWeightDecay(
learning_rate=warmup_schedule,
weight_decay_rate=0.01,
epsilon=1e-6,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports
Step2: Resources
Step3: You can get a pre-trained BERT encoder from TensorFlow Hub
Step4: The data
Step5: The info object describes the dataset and it's features
Step6: The two classes are
Step7: Here is one example from the training set
Step8: The BERT tokenizer
Step9: Tokenize a sentence
Step10: Preprocess the data
Step11: Start by encoding all the sentences while appending a [SEP] token, and packing them into ragged-tensors
Step12: Now prepend a [CLS] token, and concatenate the ragged tensors to form a single input_word_ids tensor for each example. RaggedTensor.to_tensor() zero pads to the longest sequence.
Step13: Mask and input type
Step14: The "input type" also has the same shape, but inside the non-padded region, contains a 0 or a 1 indicating which sentence the token is a part of.
Step15: Put it all together
Step16: Each subset of the data has been converted to a dictionary of features, and a set of labels. Each feature in the input dictionary has the same shape, and the number of labels should match
Step17: The model
Step18: The config defines the core BERT Model, which is a Keras model to predict the outputs of num_classes from the inputs with maximum sequence length max_seq_length.
Step19: The classifier has three inputs and one output
Step20: Run it on a test batch of data 10 examples from the training set. The output is the logits for the two classes
Step21: The TransformerEncoder in the center of the classifier above is the bert_encoder.
Step22: Restore the encoder weights
Step23: Note
Step24: This returns an AdamWeightDecay optimizer with the learning rate schedule set
Step25: To see an example of how to customize the optimizer and it's schedule, see the Optimizer schedule appendix.
Step26: Now run the fine-tuned model on a custom example to see that it works.
Step27: The model should report class 1 "match" for the first example and class 0 "no-match" for the second
Step28: Save the model
Step29: Appendix
Step30: Then apply the transformation to generate new TFRecord files.
Step31: Finally create tf.data input pipelines from those TFRecord files
Step32: The resulting tf.data.Datasets return (features, labels) pairs, as expected by keras.Model.fit
Step34: Create tf.data.Dataset for training and evaluation
Step35: <a id="hub_bert"></a>
Step36: Test run it on a batch of data
Step37: At this point it would be simple to add a classification head yourself.
Step38: The one downside to loading this model from TFHub is that the structure of internal keras layers is not restored. So it's more difficult to inspect or modify the model. The BertEncoder model is now a single layer
Step39: <a id="model_builder_functions"></a>
Step40: Restore the weights
Step41: Test run it
Step42: Wrap it in a classifier
Step43: <a id="optimizer_schedule"></a>
Step44: That high level wrapper sets up the learning rate schedules and the optimizer.
Step45: This, in turn is wrapped in a WarmUp schedule that linearly increases the learning rate to the target value over the first 10% of training
Step46: Then create the nlp.optimization.AdamWeightDecay using that schedule, configured for the BERT model
|
13,936
|
<ASSISTANT_TASK:>
Python Code:
#code for making artificial dataset
import random
def swap_two_characters(seq):
'''define a function that swaps two characters at random positions in a string '''
line = list(seq)
id_i = random.randint(0,len(line)-1)
id_j = random.randint(0,len(line)-1)
line[id_i], line[id_j] = line[id_j], line[id_i]
return ''.join(line)
def swap_characters(seed, n):
seq=seed
for i in range(n):
seq = swap_two_characters(seq)
return seq
def make_seed(start=0, end=26):
seq = ''.join([str(unichr(97+i)) for i in range(start,end)])
return swap_characters(seq, end-start)
def make_dataset(n_sequences=None, seed=None, n_swaps=None):
seqs = []
seqs.append( seed )
for i in range(n_sequences):
seq = swap_characters( seed, n_swaps )
seqs.append( seq )
return seqs
def random_capitalize(seqs, p=0.5):
new_seqs=[]
for seq in seqs:
new_seq = [c.upper() if random.random() < p else c for c in seq ]
new_seqs.append(''.join(new_seq))
return new_seqs
def make_artificial_dataset(sequence_length=None, n_sequences=None, n_swaps=None):
seed = make_seed(start=0, end=sequence_length)
print 'Seed: ',seed
seqs = make_dataset(n_sequences=n_sequences, seed=seed, n_swaps=n_swaps)
train_seqs_orig=seqs[:len(seqs)/2]
test_seqs_orig=seqs[len(seqs)/2:]
seqs = random_capitalize(seqs, p=0.5)
print 'Sample with random capitalization:',seqs[:7]
train_seqs=seqs[:len(seqs)/2]
test_seqs=seqs[len(seqs)/2:]
return train_seqs_orig, test_seqs_orig, train_seqs, test_seqs
#code to estimate predictive performance on categorical labeled sequences
def discriminative_estimate(train_pos_seqs, train_neg_seqs, test_pos_seqs, test_neg_seqs):
from eden.graph import Vectorizer
vectorizer = Vectorizer(complexity=complexity)
from eden.converter.graph.sequence import sequence_to_eden
iterable_pos = sequence_to_eden(train_pos_seqs)
iterable_neg = sequence_to_eden(train_neg_seqs)
from eden.util import fit, estimate
estimator = fit(iterable_pos,iterable_neg, vectorizer, n_iter_search=n_iter_search)
from eden.converter.graph.sequence import sequence_to_eden
iterable_pos = sequence_to_eden(test_pos_seqs)
iterable_neg = sequence_to_eden(test_neg_seqs)
estimate(iterable_pos, iterable_neg, estimator, vectorizer)
#code to create real vector labels
def make_encoding(encoding_vector_dimension=3, sequence_length=None, noise_size=0.01):
#vector encoding for chars
default_encoding = [0]*encoding_vector_dimension
start=0
end=sequence_length
#take a list of all chars up to 'length'
char_list = [str(unichr(97+i)) for i in range(start,end)]
encodings={}
import numpy as np
codes = np.random.rand(len(char_list),encoding_vector_dimension)
for i, code in enumerate(codes):
c = str(unichr(97+i))
cc = c.upper()
encoding = list(code)
encodings[c] = encoding
#add noise for the encoding of capitalized chars
noise = np.random.rand(encoding_vector_dimension)*noise_size
encodings[cc] = list(code + noise)
return encodings, default_encoding
def make_encodings(n_encodings=3, encoding_vector_dimension=3, sequence_length=None, noise_size=0.01):
encodings=[]
for i in range(1,n_encodings+1):
encoding, default_encoding = make_encoding(encoding_vector_dimension, sequence_length, noise_size=noise_size)
encodings.append(encoding)
return encodings, default_encoding
from eden.util import configure_logging
import logging
configure_logging(logging.getLogger(),verbosity=2)
#problem parameters
random.seed(1)
sequence_length = 8 #sequences length
n_sequences = 50 #num sequences in positive and negative set
n_swaps = 2 #num pairs of chars that are swapped at random
n_iter_search = 30 #num paramter configurations that are evaluated in hyperparameter optimization
complexity = 2 #feature complexity for the vectorizer
n_encodings = 5 #num vector encoding schemes for chars
encoding_vector_dimension = 9 #vector dimension for char encoding
noise_size = 0.05 #amount of random noise
print 'Positive examples:'
train_pos_seqs_orig, test_pos_seqs_orig, train_pos_seqs, test_pos_seqs = make_artificial_dataset(sequence_length,n_sequences,n_swaps)
print 'Negative examples:'
train_neg_seqs_orig, test_neg_seqs_orig, train_neg_seqs, test_neg_seqs = make_artificial_dataset(sequence_length,n_sequences,n_swaps)
%%time
#lets estimate the predictive performance of a classifier over the original sequences
print 'Predictive performance on original sequences'
discriminative_estimate(train_pos_seqs_orig, train_neg_seqs_orig, test_pos_seqs_orig, test_neg_seqs_orig)
print '\n\n'
#lets estimate the predictive performance of a classifier over the capitalized sequences
print 'Predictive performance on sequences with random capitalization'
discriminative_estimate(train_pos_seqs, train_neg_seqs, test_pos_seqs, test_neg_seqs)
#lets make a vector encoding for the chars simply using a random encoding
#and a small amount of noise for the capitalized versions
#we can generate a few encodings and let the algorithm choose the best one.
encodings, default_encoding = make_encodings(n_encodings, encoding_vector_dimension, sequence_length, noise_size)
#lets define the 3 main machines: 1) pre_processor, 2) vectorizer, 3) estimator
#the pre_processor takes the raw format and makes graphs
def pre_processor( seqs, encoding=None, default_encoding=None, **args ):
#convert sequences to path graphs
from eden.converter.graph.sequence import sequence_to_eden
graphs = sequence_to_eden(seqs)
#relabel nodes with corresponding vector encoding
from eden.modifier.graph.vertex_attributes import translate
graphs = translate(graphs, label_map = encoding, default = default_encoding)
return graphs
#the vectorizer takes graphs and makes sparse vectors
from eden.graph import Vectorizer
vectorizer = Vectorizer()
#the estimator takes a sparse data matrix and a target column vector and makes a predictive model
from sklearn.linear_model import SGDClassifier
estimator = SGDClassifier(class_weight='auto', shuffle=True)
#the model takes a pre_processor, a vectorizer, an estimator and returns the predictive model
from eden.model import ActiveLearningBinaryClassificationModel
model = ActiveLearningBinaryClassificationModel(pre_processor=pre_processor,
estimator=estimator,
vectorizer=vectorizer,
fit_vectorizer=True )
#lets define hyper-parameters vaule ranges
from numpy.random import randint
from numpy.random import uniform
pre_processor_parameters={'encoding':encodings, 'default_encoding':[default_encoding]}
vectorizer_parameters={'complexity':[complexity],
'n':randint(3, 20, size=n_iter_search)}
estimator_parameters={'n_iter':randint(5, 100, size=n_iter_search),
'penalty':['l1','l2','elasticnet'],
'l1_ratio':uniform(0.1,0.9, size=n_iter_search),
'loss':['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'],
'power_t':uniform(0.1, size=n_iter_search),
'alpha': [10**x for x in range(-8,0)],
'eta0': [10**x for x in range(-4,-1)],
'learning_rate': ["invscaling", "constant", "optimal"]}
%%time
#optimize hyperparameters and fit a predictive model
#determine optimal parameter configuration
model.optimize(train_pos_seqs, train_neg_seqs,
model_name='my_seq.model',
n_active_learning_iterations=0,
n_iter=n_iter_search, cv=3,
pre_processor_parameters=pre_processor_parameters,
vectorizer_parameters=vectorizer_parameters,
estimator_parameters=estimator_parameters)
#print optimal parameter configuration
print model.get_parameters()
#evaluate predictive performance
apr, roc = model.estimate(test_pos_seqs, test_neg_seqs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Artificial data generation
Step2: Discriminative model on categorical labels
Step3: Note
Step4: Model Auto Optimization
|
13,937
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pylab as plt
import oedes
import numpy as np
oedes.init_notebook() # for displaying progress bars
L = 200e-9 # device thickness, m
model = oedes.models.std.electrononly(L, traps=['trap'])
params = {
'T': 300, # K
'electrode0.workfunction': 0, # eV
'electrode1.workfunction': 0, # eV
'electron.energy': 0, # eV
'electron.mu': 1e-9, # m2/(Vs)
'electron.N0': 2.4e26, # 1/m^3
'electron.trap.energy': 0, # eV
'electron.trap.trate': 1e-22, # 1/(m^3 s)
'electron.trap.N0': 6.2e22, # 1/m^3
'electrode0.voltage': 0, # V
'electrode1.voltage': 0, # V
'epsilon_r': 3. # 1
}
trapenergy_sweep = oedes.sweep('electron.trap.energy',np.asarray([-0.45, -0.33, -0.21, 1.]))
voltage_sweep = oedes.sweep('electrode0.voltage', np.logspace(-3, np.log10(20.), 100))
c=oedes.context(model)
for tdepth,ct in c.sweep(params, trapenergy_sweep):
for _ in ct.sweep(ct.params, voltage_sweep):
pass
v,j = ct.teval(voltage_sweep.parameter_name,'J')
oedes.testing.store(j, rtol=1e-3) # for automatic testing
if tdepth < 0:
label = 'no traps'
else:
label = 'trap depth %s eV' % tdepth
plt.plot(v,j,label=label)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('V')
plt.ylabel(r'$\mathrm{A/m^2}$')
plt.legend(loc=0,frameon=False);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model and parameters
Step2: Sweep parameters
Step3: Result
|
13,938
|
<ASSISTANT_TASK:>
Python Code:
# Import essential libraries for following calculation
import libpysal as ps
import numpy as np
from libpysal.cg.shapes import Ring, Polygon
from libpysal.cg.segmentLocator import BruteSegmentLocator
from libpysal.cg.polygonQuadTreeStructure import QuadTreeStructureSingleRing
import libpysal.examples as pysal_examples
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import time
# import codecs
# import shapely
# from pysal.cg.shapes import Polygon, Point
# from shapely.geometry import Polygon as spPolygon
# from shapely.geometry import Point as spPoint
# import time
import random
print("import finished!")
def get_ring_from_file(path):
vertices = []
file = open(path, "r")
for line in file:
if len(line)<2:
continue
coordinates = line.split(",")
x = (float)(coordinates[0])
y = (float)(coordinates[1])
vertices.append((x, y))
file.close()
return vertices
# Prepare the polygons for future use.
Texas = Polygon(get_ring_from_file("data/texas_points.txt"))
Pecos = Polygon(get_ring_from_file("data/pecos_points.txt"))
San_Saba = Polygon(get_ring_from_file("data/san_saba_points.txt"))
Texas_with_holes = Polygon(Texas.vertices,
[Pecos.vertices, San_Saba.vertices])
# construct the quadtree structure by explicitly calling the function "build_quad_tree_structure" in polygon
Texas.build_quad_tree_structure()
print "quad tree build finished"
# create some random point and test if these points locate in the polygon
for i in range(0, 10):
x = random.uniform(Texas.bbox[0], Texas.bbox[2])
y = random.uniform(Texas.bbox[1], Texas.bbox[3])
print(x, y, Texas.contains_point([x, y]))
# The region of Texas, to make the steps more clear, here we only use the main region
texas_main_vertices = Texas.parts[0]
fig, ax = plt.subplots(figsize=(16,11))
qts_singlering = QuadTreeStructureSingleRing(Ring(texas_main_vertices))
patches = []
color_array = []
cells_to_draw = [qts_singlering.root_cell]
while len(cells_to_draw) > 0:
cell = cells_to_draw.pop()
if cell.children_l_b is None:
# this is a leaf in the quad tree structure, draw it
verts = [
[cell.min_x, cell.min_y],
[cell.min_x, cell.min_y + cell.length_y],
[cell.min_x + cell.length_x, cell.min_y + cell.length_y],
[cell.min_x + cell.length_x, cell.min_y],
[cell.min_x, cell.min_y]
]
patches.append(verts)
if cell.status == "in":
color_array.append("#c8e6c9") # in color green
elif cell.status == "out":
color_array.append("#b0bec5") # in color grey
else: # means "maybe"
color_array.append("#ffa726") # in color orange
else:
cells_to_draw.append(cell.children_l_b)
cells_to_draw.append(cell.children_l_t)
cells_to_draw.append(cell.children_r_b)
cells_to_draw.append(cell.children_r_t)
coll = matplotlib.collections.PolyCollection(np.array(patches), facecolors=color_array, edgecolors='#eceff1')
ax.add_collection(coll)
point_x_list = []
point_y_list = []
for point in texas_main_vertices:
point_x_list.append(point[0])
point_y_list.append(point[1])
plt.plot(point_x_list, point_y_list)
ax.autoscale_view()
plt.show()
plt.figure(figsize=(16,12))
for vertices in Texas_with_holes.parts:
line_x_list = []
line_y_list = []
for point in vertices:
line_x_list.append(point[0])
line_y_list.append(point[1])
plt.plot(line_x_list, line_y_list, c="#6a1b9a")
for vertices in Texas_with_holes.holes:
line_x_list = []
line_y_list = []
for point in vertices:
line_x_list.append(point[0])
line_y_list.append(point[1])
plt.plot(line_x_list, line_y_list, c="#1565c0")
point_x_list = []
point_y_list = []
point_colors = []
bbox = Texas_with_holes.bbox
for i in range(0, 1000):
x = random.uniform(bbox[0], bbox[2])
y = random.uniform(bbox[1], bbox[3])
point_x_list.append(x)
point_y_list.append(y)
if Texas_with_holes.contains_point([x, y]):
point_colors.append("#e57373") # inside, red
else:
point_colors.append("#4db6ac") # outside, green
plt.scatter(point_x_list, point_y_list, c = point_colors, linewidth = 0)
plt.show()
# construct a study area with 3000+ vertices
Huangshan = Polygon(get_ring_from_file("data/study_region_huangshan_point.txt"))
points = []
bbox = Huangshan.bounding_box
for i in range(0, 10000):
x = random.uniform(bbox[0], bbox[2])
y = random.uniform(bbox[1], bbox[3])
points.append((x, y))
print str(len(points)) + " random points generated"
print "------------------------------"
print "Begin test without quad-tree-structure"
time_begin = int(round(time.time() * 1000))
for point in points:
Huangshan.contains_point(point)
time_end = int(round(time.time() * 1000))
print "Test without quad-tree-structure finished, time used = " + str((time_end-time_begin)/1000.0) + "s"
print "------------------------------"
print "Begin test with quad-tree-structure"
time_begin = int(round(time.time() * 1000))
Huangshan.build_quad_tree_structure()
count_error = 0
for point in points:
Huangshan.contains_point(point)
time_end = int(round(time.time() * 1000))
print "Test with quad-tree-structure finished, time used = " + str((time_end-time_begin)/1000.0) + "s"
# polygons = ps.open("data/Huangshan_region.shp") # read the research region shape file
# research_region = polygons[0] # set the first polygon as research polygon
# len(research_region.vertices)
vertices = get_ring_from_file("data/study_region_huangshan_point.txt")
print "Study region read finished, with vertices of " + str(len(vertices))
huangshan = Ring(vertices)
points = []
bbox = huangshan.bounding_box
for i in range(0, 1000):
x = random.uniform(bbox[0], bbox[2])
y = random.uniform(bbox[1], bbox[3])
points.append([x, y, True])
print str(len(points)) + " random points generated"
# First, test if these points are inside of the polygon by using the conventional method, record the result
for point in points:
is_in = huangshan.contains_point((point[0], point[1]))
point[2] = is_in
# Then, build the quad-tree and do the test again. Compare the results of two methods.
count_error = 0
huangshan.build_quad_tree_structure()
for point in points:
is_in = huangshan.contains_point((point[0], point[1]))
if point[2] != is_in:
print "Error found!!!"
count_error += 1
if count_error == 0:
print "finished ==================== no error found"
else:
print "finished ==================== ERROR FOUND"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How to Use
Step2: The process of building quadtree
Step3: Visualizing the result of "Point in Polygon" test
Step4: Test the performance of this quad-tree-structure
Step5: Validate the correctness of this quad-tree-structure
|
13,939
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
# RMS Titanic data visualization code
from titanic_visualizations import survival_stats
from IPython.display import display
%matplotlib inline
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head())
def accuracy_score(truth, pred):
Returns accuracy score for input truth and predictions.
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(outcomes[:5], predictions)
def predictions_0(data):
Model with no features. Always predicts a passenger did not survive.
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Sex')
def predictions_1(data):
Model with one feature:
- Predict a passenger survived if they are female.
predictions = []
for _, passenger in data.iterrows():
res = 1
if passenger['Sex'] != 'female':
res = 0
predictions.append(res)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
def predictions_2(data):
Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10.
predictions = []
for _, passenger in data.iterrows():
res = 0
if passenger['Sex'] == 'female':
res = 1
elif passenger['Sex'] == 'male' and passenger['Age'] < 10:
res = 1
predictions.append(res)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'SibSp', ["Sex == 'female'", "Pclass > 2", "SibSp < 1"])
def predictions_3(data):
Model with multiple features. Makes a prediction with an accuracy of at least 80%.
predictions = []
for _, passenger in data.iterrows():
'''
The 'Sex' variable has a main influence in survival rate.
Into the 'Sex' variable, there is differentes influences to each value.
To 'female' passengers, the 'Pclass' and 'SibSp' variable has better influence in survival.
In case of 'male', the influence came from 'Fare' and 'Age', accordly the research.
At least where I researched
In resume, the prediction follow this flow:
female -> (Pclass, SibSp)
male -> (Fare, Age)
'''
res = 0
if passenger['Sex'] == 'female':
# For females, the Pclass has a lot of survival information
if passenger['Pclass'] in (1,2):
res = 1
# for female in Pclass 3, those with SibSp = 0, there is a large chance of survival
elif passenger['SibSp'] == 0:
res = 1
# For male, the variable fare has influence in survival
if passenger['Sex'] == 'male':
fare = passenger['Fare']
# Age
if passenger["Age"] < 10:
res = 1
# Fare variable has relation with survival rate in male
elif fare >= 120 and fare < 200:
res = 1
predictions.append(res)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
print accuracy_score(outcomes, predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship
Step3: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcome[i].
Step5: Tip
Step6: Question 1
Step7: Answer
Step9: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction
Step10: Question 2
Step11: Answer
Step13: Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction
Step14: Question 3
Step15: Answer
Step17: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
Step18: Question 4
|
13,940
|
<ASSISTANT_TASK:>
Python Code:
data_agr = pd.read_csv('CrowdstormingDataJuly1st_aggregated_encoded.csv')
data_agr.head()
data_agr = data_agr.drop(['playerShort', 'player'], axis=1)
data_train = data_agr.drop(['color_rating'], axis=1)
colors = data_agr['color_rating']
col = data_train.columns
data_train = pd.DataFrame(pp.normalize(data_train))
data_train.columns = col
data_train.head()
from sklearn import metrics
from sklearn.cluster import KMeans
np.random.seed(1)
def wrong_pred(ratings, labels):
returns the percentage of wrong prediction
ratings = ratings.apply(lambda x: mapping(x))
dif = np.abs(ratings - labels)
# the difference between the ratings and labels has be 1 to count a as wrong prediction
return (min(len(dif[dif==1]), len(dif[dif==0]))/len(labels))
def mapping(x):
if (x < 3):
return 0
if (x == 3):
return 0.5
return 1
def fit_data(data):
kmeans = KMeans(init='k-means++', n_clusters=2, n_init=1)
kmeans.fit(data)
silhouette = metrics.silhouette_score(data, kmeans.labels_, metric='euclidean')
skin = wrong_pred(colors, kmeans.labels_)
return silhouette, skin
silhouettes_scores = []
skin_scores = []
d = data_train
globalbest = -1
globalbest_feature = data_train.columns
while (len(d.columns) > 1):
current_best = -1
for feature in d:
data_temp = d.drop([feature], axis=1)
silhouette, skin = fit_data(data_temp)
if (silhouette > current_best):
current_best = silhouette
current_skin = skin
worst_feature = feature
if(silhouette > globalbest):
globalbest = silhouette
globalbest_feature = data_temp.columns
silhouettes_scores.append(current_best)
skin_scores.append(current_skin)
print('worst feature is \"' + worst_feature + '\" without it silhouette is ' + "%.3f" % current_best)
print('dark_light prediction made ' + "%.3f" % current_skin + '% wrong prediction' )
print('')
d = d.drop([worst_feature], axis=1)
print('the features with the best silhouette score is/are ' + str(globalbest_feature))
length = range(len(data_train.columns) -1 , 0, -1)
plt.plot(length, silhouettes_scores, label='silhouette')
plt.plot(length, 1 - np.array(skin_scores) , label = 'skin score')
plt.xlabel('Number of features')
plt.gca().invert_xaxis()
plt.legend()
kmeans = KMeans(init='k-means++', n_clusters=2, n_init=1)
kmeans.fit(data_agr.seExp.to_frame())
labels = kmeans.labels_
plt.scatter(data_agr.seExp, data_agr.seExp,c = labels)
plt.scatter(kmeans.cluster_centers_, kmeans.cluster_centers_, c = ['r', 'g'], s=100)
plt.title('Clustering based on seExp')
plt.ylabel('seExp')
plt.xlabel('seExp')
plt.figure(figsize=(2,2))
plt.hist(labels)
print('Number of elements in cluster 0: {} ({:.2f}%)'.format(len(labels[labels == 0]) ,len(labels[labels == 0]) / len(labels) * 100))
print('Number of elements in cluster 0: {} ({:.2f}%)'.format(len(labels[labels == 1]) ,len(labels[labels == 1]) / len(labels) * 100))
plt.figure(figsize=(4,4))
plt.hist(data_agr[labels == 0].seExp)
kmeans.cluster_centers_[0]
colors = data_agr.color_rating
print('Arruracy: {:.2f} %'.format( 100- wrong_pred(colors, labels) * 100))
colors3 = colors.apply(lambda x: mapping(x))
plt.hist(colors3)
print('Proportion of White: {:.2f}%'.format(len(colors3[colors3==0]) / len(colors3) * 100))
print('Proportion of Neutral: {:.2f}%'.format(len(colors3[colors3==0.5]) / len(colors3) * 100))
print('Proportion of Black: {:.2f}%'.format(len(colors3[colors3==1]) / len(colors3) * 100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We drop the features that are unique to the players and we normalize them. That way all the features will be in [-1;1]. We also remove the color_ratings from the training data.
Step2: KMeans
Step4: assumption
Step5: To find the best set of features for which the silhouette score is maximal, we would have an exponential amount of sets to test. This is clearly not feasible. We have therefore decided to use a greedy strategy to find an approximation of the optimal.
Step6: Observation of the results
Step7: Then we plot the point according to seExp with the center of the 2 clusters
Step8: So first of all we can see that we have "separate clusters". But it's not clear at all that this 2 cluster return a silhouette score over 0.90. Let's rememenber what is the silhouette score (according to the documentation of sklearn)
Step9: Ok so almost all the point are in the cluster 0
Step10: We have a lot of point concentrate close to 0. Let's see what is the value of the center of the cluster 0
Step11: Almost all the points are close to the center so the cluster 0 is almost represented by one point then this is why we have such a high silhouette score
Step12: Remember that we assume that a player with color rating N (value 3 in our case) cannot be misclusterised since we can't determined if it is more white or more black'. So let's see the distribution of white, neutral and black people
|
13,941
|
<ASSISTANT_TASK:>
Python Code:
print_synonyms('dx::440.0', model)
#Crohn's Disease
print_synonyms('dx::555.9', model)
print_synonyms_filt('dx::042', model, 'rx')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Peptic Ulcers
Step2: Arthritis
|
13,942
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from google.cloud import bigquery
PROJECT = !gcloud config get-value project
PROJECT = PROJECT[0]
%env PROJECT=$PROJECT
def create_query(phase, sample_size):
basequery =
SELECT
(tolls_amount + fare_amount) AS fare_amount,
EXTRACT(DAYOFWEEK from pickup_datetime) AS dayofweek,
EXTRACT(HOUR from pickup_datetime) AS hourofday,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat
FROM
`nyc-tlc.yellow.trips`
WHERE
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N)) = 1
if phase == "TRAIN":
subsample =
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N * 100)) >= (EVERY_N * 0)
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N * 100)) < (EVERY_N * 70)
elif phase == "VALID":
subsample =
# TODO: Your code goes here
elif phase == "TEST":
subsample =
# TODO: Your code goes here
query = basequery + subsample
return query.replace("EVERY_N", sample_size)
bq = bigquery.Client(project=PROJECT)
for phase in ["TRAIN", "VALID", "TEST"]:
# 1. Create query string
query_string = None # TODO: Your code goes here
# 2. Load results into DataFrame
df = None # TODO: Your code goes here
# 3. Write DataFrame to CSV
df.to_csv(f"taxi-{phase.lower()}.csv", index_label=False, index=False)
print("Wrote {} lines to {}".format(len(df), f"taxi-{phase.lower()}.csv"))
!ls -l *.csv
!head taxi-train.csv
def euclidean_distance(df):
return # TODO: Your code goes here
def compute_rmse(actual, predicted):
return # TODO: Your code goes here
def print_rmse(df, rate, name):
print(
"{} RMSE = {}".format(
compute_rmse(df["fare_amount"], rate * euclidean_distance(df)),
name,
)
)
df_train = pd.read_csv("taxi-train.csv")
df_valid = pd.read_csv("taxi-valid.csv")
rate = df_train["fare_amount"].mean() / euclidean_distance(df_train).mean()
print_rmse(df_train, rate, "Train")
print_rmse(df_valid, rate, "Valid")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Review
Step6: Write to CSV
Step7: Note that even with a 1/5000th sample we have a good amount of data for ML. 150K training examples and 30K validation.
Step8: Preview one of the files
Step9: Looks good! We now have our ML datasets and are ready to train ML models, validate them and test them.
|
13,943
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import io
import math
import os
import random
from six.moves import urllib
from IPython.display import clear_output, Image, display, HTML
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_hub as hub
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as sk_metrics
import time
FLOWERS_DIR = './flower_photos'
TRAIN_FRACTION = 0.8
RANDOM_SEED = 2018
def download_images():
If the images aren't already downloaded, save them to FLOWERS_DIR.
if not os.path.exists(FLOWERS_DIR):
DOWNLOAD_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
print('Downloading flower images from %s...' % DOWNLOAD_URL)
urllib.request.urlretrieve(DOWNLOAD_URL, 'flower_photos.tgz')
!tar xfz flower_photos.tgz
print('Flower photos are located in %s' % FLOWERS_DIR)
def make_train_and_test_sets():
Split the data into train and test sets and get the label classes.
train_examples, test_examples = [], []
shuffler = random.Random(RANDOM_SEED)
is_root = True
for (dirname, subdirs, filenames) in tf.gfile.Walk(FLOWERS_DIR):
# The root directory gives us the classes
if is_root:
subdirs = sorted(subdirs)
classes = collections.OrderedDict(enumerate(subdirs))
label_to_class = dict([(x, i) for i, x in enumerate(subdirs)])
is_root = False
# The sub directories give us the image files for training.
else:
filenames.sort()
shuffler.shuffle(filenames)
full_filenames = [os.path.join(dirname, f) for f in filenames]
label = dirname.split('/')[-1]
label_class = label_to_class[label]
# An example is the image file and it's label class.
examples = list(zip(full_filenames, [label_class] * len(filenames)))
num_train = int(len(filenames) * TRAIN_FRACTION)
train_examples.extend(examples[:num_train])
test_examples.extend(examples[num_train:])
shuffler.shuffle(train_examples)
shuffler.shuffle(test_examples)
return train_examples, test_examples, classes
# Download the images and split the images into train and test sets.
download_images()
TRAIN_EXAMPLES, TEST_EXAMPLES, CLASSES = make_train_and_test_sets()
NUM_CLASSES = len(CLASSES)
print('\nThe dataset has %d label classes: %s' % (NUM_CLASSES, CLASSES.values()))
print('There are %d training images' % len(TRAIN_EXAMPLES))
print('there are %d test images' % len(TEST_EXAMPLES))
#@title Show some labeled images
def get_label(example):
Get the label (number) for given example.
return example[1]
def get_class(example):
Get the class (string) of given example.
return CLASSES[get_label(example)]
def get_encoded_image(example):
Get the image data (encoded jpg) of given example.
image_path = example[0]
return tf.gfile.GFile(image_path, 'rb').read()
def get_image(example):
Get image as np.array of pixels for given example.
return plt.imread(io.BytesIO(get_encoded_image(example)), format='jpg')
def display_images(images_and_classes, cols=5):
Display given images and their labels in a grid.
rows = int(math.ceil(len(images_and_classes) / cols))
fig = plt.figure()
fig.set_size_inches(cols * 3, rows * 3)
for i, (image, flower_class) in enumerate(images_and_classes):
plt.subplot(rows, cols, i + 1)
plt.axis('off')
plt.imshow(image)
plt.title(flower_class)
NUM_IMAGES = 15 #@param {type: 'integer'}
display_images([(get_image(example), get_class(example))
for example in TRAIN_EXAMPLES[:NUM_IMAGES]])
LEARNING_RATE = 0.01
tf.reset_default_graph()
# Load a pre-trained TF-Hub module for extracting features from images. We've
# chosen this particular module for speed, but many other choices are available.
image_module = hub.Module('https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/feature_vector/2')
# Preprocessing images into tensors with size expected by the image module.
encoded_images = tf.placeholder(tf.string, shape=[None])
image_size = hub.get_expected_image_size(image_module)
def decode_and_resize_image(encoded):
decoded = tf.image.decode_jpeg(encoded, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
return tf.image.resize_images(decoded, image_size)
batch_images = tf.map_fn(decode_and_resize_image, encoded_images, dtype=tf.float32)
# The image module can be applied as a function to extract feature vectors for a
# batch of images.
features = image_module(batch_images)
def create_model(features):
Build a model for classification from extracted features.
# Currently, the model is just a single linear layer. You can try to add
# another layer, but be careful... two linear layers (when activation=None)
# are equivalent to a single linear layer. You can create a nonlinear layer
# like this:
# layer = tf.layers.dense(inputs=..., units=..., activation=tf.nn.relu)
layer = tf.layers.dense(inputs=features, units=NUM_CLASSES, activation=None)
return layer
# For each class (kind of flower), the model outputs some real number as a score
# how much the input resembles this class. This vector of numbers is often
# called the "logits".
logits = create_model(features)
labels = tf.placeholder(tf.float32, [None, NUM_CLASSES])
# Mathematically, a good way to measure how much the predicted probabilities
# diverge from the truth is the "cross-entropy" between the two probability
# distributions. For numerical stability, this is best done directly from the
# logits, not the probabilities extracted from them.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
# Let's add an optimizer so we can train the network.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE)
train_op = optimizer.minimize(loss=cross_entropy_mean)
# The "softmax" function transforms the logits vector into a vector of
# probabilities: non-negative numbers that sum up to one, and the i-th number
# says how likely the input comes from class i.
probabilities = tf.nn.softmax(logits)
# We choose the highest one as the predicted class.
prediction = tf.argmax(probabilities, 1)
correct_prediction = tf.equal(prediction, tf.argmax(labels, 1))
# The accuracy will allow us to eval on our test set.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# How long will we train the network (number of batches).
NUM_TRAIN_STEPS = 100 #@param {type: 'integer'}
# How many training examples we use in each step.
TRAIN_BATCH_SIZE = 10 #@param {type: 'integer'}
# How often to evaluate the model performance.
EVAL_EVERY = 10 #@param {type: 'integer'}
def get_batch(batch_size=None, test=False):
Get a random batch of examples.
examples = TEST_EXAMPLES if test else TRAIN_EXAMPLES
batch_examples = random.sample(examples, batch_size) if batch_size else examples
return batch_examples
def get_images_and_labels(batch_examples):
images = [get_encoded_image(e) for e in batch_examples]
one_hot_labels = [get_label_one_hot(e) for e in batch_examples]
return images, one_hot_labels
def get_label_one_hot(example):
Get the one hot encoding vector for the example.
one_hot_vector = np.zeros(NUM_CLASSES)
np.put(one_hot_vector, get_label(example), 1)
return one_hot_vector
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(NUM_TRAIN_STEPS):
# Get a random batch of training examples.
train_batch = get_batch(batch_size=TRAIN_BATCH_SIZE)
batch_images, batch_labels = get_images_and_labels(train_batch)
# Run the train_op to train the model.
train_loss, _, train_accuracy = sess.run(
[cross_entropy_mean, train_op, accuracy],
feed_dict={encoded_images: batch_images, labels: batch_labels})
is_final_step = (i == (NUM_TRAIN_STEPS - 1))
if i % EVAL_EVERY == 0 or is_final_step:
# Get a batch of test examples.
test_batch = get_batch(batch_size=None, test=True)
batch_images, batch_labels = get_images_and_labels(test_batch)
# Evaluate how well our model performs on the test set.
test_loss, test_accuracy, test_prediction, correct_predicate = sess.run(
[cross_entropy_mean, accuracy, prediction, correct_prediction],
feed_dict={encoded_images: batch_images, labels: batch_labels})
print('Test accuracy at step %s: %.2f%%' % (i, (test_accuracy * 100)))
def show_confusion_matrix(test_labels, predictions):
Compute confusion matrix and normalize.
confusion = sk_metrics.confusion_matrix(
np.argmax(test_labels, axis=1), predictions)
confusion_normalized = confusion.astype("float") / confusion.sum(axis=1)
axis_labels = list(CLASSES.values())
ax = sns.heatmap(
confusion_normalized, xticklabels=axis_labels, yticklabels=axis_labels,
cmap='Blues', annot=True, fmt='.2f', square=True)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
show_confusion_matrix(batch_labels, test_prediction)
incorrect = [
(example, CLASSES[prediction])
for example, prediction, is_correct in zip(test_batch, test_prediction, correct_predicate)
if not is_correct
]
display_images(
[(get_image(example), "prediction: {0}\nlabel:{1}".format(incorrect_prediction, get_class(example)))
for (example, incorrect_prediction) in incorrect[:20]])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 転移学習で花を分類する
Step4: Flowers データセット
Step10: データを確認する
Step12: モデルを構築する
Step16: ネットワークをトレーニングする
Step17: 不正確な予測
|
13,944
|
<ASSISTANT_TASK:>
Python Code:
import torch
from torch.autograd import Variable
x = Variable(torch.Tensor([[1], [2], [3]]))
y = Variable(torch.Tensor([[1], [2], [3]]))
w = Variable(torch.randn(1, 1), requires_grad = True)
b = Variable(torch.randn(1), requires_grad = True)
learning_rate = 1e-2
# trainning
for i in range(1000) :
# network model
y_pred = torch.mm(x, w)
y_pred += b.unsqueeze(1).expand_as(y_pred)
loss = (y_pred - y).pow(2).sum()
# initialize network mdoel parameter grad
w.grad.data.zero_()
b.grad.data.zero_()
# update w, b
loss.backward()
w.data -= learning_rate * w.grad.data
b.data -= learning_rate * b.grad.data
# training result
print ('w = ', w.data[0][0], ', b=', b.data[0])
import torch
from torch.autograd import Variable
x = Variable(torch.Tensor([[1], [2], [3]]))
y = Variable(torch.Tensor([[1], [2], [3]]))
w = Variable(torch.randn(1, 1), requires_grad = True)
b = Variable(torch.randn(1), requires_grad = True)
optimizer = torch.optim.Adam((w, b), lr=1e-2)
# trainning
for i in range(1000) :
# network model
y_pred = torch.mm(x, w)
y_pred += b.unsqueeze(1).expand_as(y_pred)
loss = (y_pred - y).pow(2).sum()
# initialize network mdoel parameter grad
optimizer.zero_grad()
# optimizaer step
loss.backward()
optimizer.step()
# training result
print ('w = ', w.data[0][0], ', b=', b.data[0])
import torch
from torch.autograd import Variable
x = Variable(torch.Tensor([[1], [2], [3]]))
y = Variable(torch.Tensor([[1], [2], [3]]))
# define model, loss_function, optimizser
model = torch.nn.Sequential(torch.nn.Linear(1, 1, bias=True))
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
# trainning
for i in range(1000) :
# network model
y_pred = model(x)
loss = loss_fn(y_pred, y)
# zero_grad, backward, step(update parameter) in series
optimizer.zero_grad()
loss.backward()
optimizer.step()
# training result
print(list(model.parameters()))
import tensorflow as tf
x_data = [1, 2, 3]
y_data = [1, 2, 3]
w = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
x = tf.placeholder(tf.float32, name="X")
y = tf.placeholder(tf.float32, name="Y")
# network model, loss function
y_pred = tf.add(tf.mul(w, x), b)
loss = tf.reduce_mean(tf.square(y_pred - y))
# optimizser
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss)
with tf.Session() as sess:
# initialize network mdoel
sess.run(tf.global_variables_initializer())
# trainning
for step in range(1000):
# optimizaer step
sess.run(train_op, feed_dict={x: x_data, y: y_data})
print ('w = ', sess.run(w), ', b=', sess.run(b))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: optimizer 이용하여 구현
Step2: torch.nn에서 제공하는 funcion으로 구현
Step3: Tensorflow 코드와 비교
|
13,945
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
try:
from importlib import reload # python3
except:
pass # python2 (reload in default namespace)
import sys
import shutil
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import flopy
from flopy.utils.geometry import Polygon, LineString, Point
from flopy.utils.reference import SpatialReference
from flopy.export.shapefile_utils import recarray2shp, shp2recarray
from flopy.utils.modpathfile import PathlineFile, EndpointFile
from flopy.utils.reference import epsgRef
ep = epsgRef()
ep.reset()
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
m = flopy.modflow.Modflow('toy_model', model_ws='temp')
botm = np.zeros((2, 10, 10))
botm[0, :, :] = 1.5
botm[1, 5, 5] = 4 # negative layer thickness!
botm[1, 6, 6] = 4
dis = flopy.modflow.ModflowDis(nrow=10, ncol=10,
nlay=2, delr=100, delc=100,
top=3, botm=botm, model=m)
m.sr = SpatialReference(delr=m.dis.delr * .3048, delc=m.dis.delc * .3048, xul=600000, yul=5170000,
proj4_str='EPSG:26715', rotation=45)
chk = dis.check()
chk.summary_array
get_vertices = m.sr.get_vertices # function to get the referenced vertices for a model cell
geoms = [Polygon(get_vertices(i, j)) for i, j in chk.summary_array[['i', 'j']]]
geoms[0].type
geoms[0].exterior
geoms[0].bounds
geoms[0].plot() # this feature requires descartes
recarray2shp(chk.summary_array, geoms, 'temp/test.shp', epsg=26715)
shutil.copy('temp/test.prj', 'temp/26715.prj')
recarray2shp(chk.summary_array, geoms, 'temp/test.shp', prj='temp/26715.prj')
ra = shp2recarray('temp/test.shp')
ra
ra.geometry[0].plot()
import epsgref
reload(epsgref)
from epsgref import prj
prj
from flopy.utils.reference import getprj, epsgRef
getprj(4326)
reload(epsgref)
from epsgref import prj
for k, v in prj.items():
print('{}:\n{}\n'.format(k, v))
ep = epsgRef()
ep.add(9999, 'junk')
epsgRef.show()
ep.remove(9999)
epsgRef.show()
ep.reset()
reload(epsgref)
from epsgref import prj
prj
len(prj.keys())
pthfile = PathlineFile('../data/mp6/EXAMPLE-3.pathline')
pthdata = pthfile._data.view(np.recarray)
length_mult = 1. # multiplier to convert coordinates from model to real world
rot = 0 # grid rotation
particles = np.unique(pthdata.particleid)
geoms = []
for pid in particles:
ra = pthdata[pthdata.particleid == pid]
x, y = SpatialReference.rotate(ra.x * length_mult,
ra.y * length_mult,
theta=rot)
z = ra.z
geoms.append(LineString(list(zip(x, y, z))))
geoms[0]
geoms[0].plot()
fig, ax = plt.subplots()
for g in geoms:
g.plot(ax=ax)
ax.autoscale()
ax.set_aspect(1)
eptfile = EndpointFile('../data/mp6/EXAMPLE-3.endpoint')
eptdata = eptfile.get_alldata()
x, y = SpatialReference.rotate(eptdata['x0'] * length_mult,
eptdata['y0'] * length_mult,
theta=rot)
z = eptdata['z0']
geoms = [Point(x[i], y[i], z[i]) for i in range(len(eptdata))]
fig, ax = plt.subplots()
for g in geoms:
g.plot(ax=ax)
ax.autoscale()
ax.set_aspect(2e-6)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: write a numpy record array to a shapefile
Step2: set the spatial reference
Step3: make geometry objects for the cells with errors
Step4: write the shapefile
Step5: read it back in
Step6: How the epsg feature works
Step7: working with the flopy.utils.reference.epsgRef handler
Step8: remove an entry
Step9: start over with a new file
Step10: Other geometry types
Step11: Points
|
13,946
|
<ASSISTANT_TASK:>
Python Code:
import skgstat as skg
from skgstat.util.likelihood import get_likelihood
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import warnings
from time import time
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
# use the same dataset as used in GMD paper
c, v = skg.data.pancake(N=300, seed=42).get('sample')
t1 = time()
V = skg.Variogram(c,v, bin_func='scott', maxlag=0.7, fit_func='trf')
t2 = time() # get time for full analysis, including fit
print(f"Processing time: {round((t2 - t1) * 1000)} ms")
print(V)
fig = V.plot()
# base initial guess on separating distance and sample variance
sep_mean = V.distance.mean()
sam_var = V.values.var()
print(f"Mean sep. distance: {sep_mean.round(1)} sample variance: {sam_var.round(1)}")
# create initial guess
# mean dist. variance 5% of variance
p0 = np.array([sep_mean, sam_var, 0.1 * sam_var])
print('initial guess: ', p0.round(1))
# create the bounds to restrict optimization
bounds = [[0, V.bins[-1]], [0, 3*sam_var], [0, 2.9*sam_var]]
print('bounds: ', bounds)
# load the likelihood function for this variogram
likelihood = get_likelihood(V)
# minimize the likelihood function
t3 = time()
res = minimize(likelihood, p0, bounds=bounds, method='SLSQP')
t4 = time()
print(f"Processing time {np.round(t4 - t3, 2)} seconds")
print('initial guess: ', p0.round(1))
print('optimal parameters:', res.x.round(1))
# use 100 steps
x = np.linspace(0, V.bins[-1], 100)
# apply the maximum likelihood fit parameters
y_ml = V.model(x, *res.x)
# apply the trf fit
y_trf = V.fitted_model(x)
# apply Levelberg marquard
V.fit_method = 'lm'
y_lm = V.fitted_model(x)
# apply parameter ml
V.fit_method = 'ml'
y_pml = V.fitted_model(x)
# check if the method-of-moment fits are different
print('Trf and Levenberg-Marquardt identical: ', all(y_lm - y_trf < 0.1))
print('Trf and parameter ML identical: ', all(y_pml - y_trf < 0.1))
plt.plot(V.bins, V.experimental, '.b', label='experimental')
plt.plot(x, y_ml, '-g', label='ML fit (Lark, 2000)')
plt.plot(x, y_trf, '-b', label='SciKit-GStat TRF')
plt.legend(loc='lower right')
#plt.gcf().savefig('compare.pdf', dpi=300)
from scipy.spatial.distance import squareform
from scipy.linalg import inv, det
# define the spherical model only dependent on the range
def f(h, a):
if h >= a:
return 1.
elif h == 0:
return 0.
return (3*h) / (2*a) - 0.5 * (h / a)**3
# create the autocovariance matrix
def get_A(r, s, b, dists):
a = np.array([f(d, r) for d in dists])
A = squareform((s / (s + b)) * (1 - a))
np.fill_diagonal(A, 1)
return A
# likelihood function
def like(r, s, b, z, dists):
A = get_A(r, s, b, dists)
n = len(A)
A_inv = inv(A)
ones = np.ones((n, 1))
z = z.reshape(n, -1)
m = inv(ones.T @ A_inv @ ones) @ (ones.T @ A_inv @ z)
b = np.log((z - m).T @ A_inv @ (z - m))
d = np.log(det(A))
if d == -np.inf:
print('invalid det(A)')
return np.inf
loglike = (n / 2)*np.log(2*np.pi) + (n / 2) - (n / 2)* np.log(n) + 0.5* d + (n / 2) * b
return loglike.flatten()[0]
from scipy.optimize import minimize
from scipy.spatial.distance import pdist
# c and v are coordinate and values array from the data source
z = np.array(v)
# in case you use 2D coordinates, without caching and euclidean metric, skgstat is using pdist under the hood
dists = pdist(c)
fun = lambda x, *args: like(x[0], x[1], x[2], z=z, dists=dists)
t3 = time()
res = minimize(fun, p0, bounds=bounds)
t4 = time()
print(f"Processing time {np.round(t4 - t3, 2)} seconds")
print('initial guess: ', p0.round(1))
print('optimal parameters:', res.x.round(1))
import matplotlib.pyplot as plt
mod = lambda h: f(h, res.x[0]) * res.x[1] + res.x[2]
x = np.linspace(0, 450, 100)
y = list(map(mod, x))
y2 = V.fitted_model(x)
plt.plot(V.bins, V.experimental, '.b', label='experimental')
plt.plot(x, y, '-g', label='ML fit (Lark, 2000)')
plt.plot(x, y2, '-b', label='SciKit-GStat default fit')
plt.legend(loc='lower right')
plt.gcf().savefig('compare.pdf', dpi=300)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We use the pancake dataset, sampled at 300 random locations to produce a quite dense sample.
Step2: First of, the variogram is calculated. We use Scott's rule to determine the number of lag classes, explicitly set Trust-Region Reflective as fitting method (although its default) and limit the distance matrix to 70% of the maximum separating distance.
Step3: Maximum likelihood using SciKit-GStat
Step4: Next step is to pass the Variogram instance to the function factory. We find optimal parameters by minimizing the returned negative log-likelihood function. Please refer to SciPy's minimize function to learn about attributes. The returned function from the utility suite is built with SciPy in mind, as the function signature complies to SciPy's interface and, thus can just be passed to the minimize function.
Step5: Apply the optimized parameters. For comparison, the three method-of-moment methods from SciKit-GStat are applied as well. Note that the used sample is quite dense. Thus we do not expect a different between the MoM based procedures. They should all find the same paramters.
Step6: Make the result plot
Step7: Build from scratch
|
13,947
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
digits = load_digits()
X_train, X_test, y_train, y_test = train_test_split(digits.data,
digits.target)
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
standard_scaler = StandardScaler()
standard_scaler.fit(X_train)
X_train_scaled = standard_scaler.transform(X_train)
svm = SVC().fit(X_train_scaled, y_train)
#pipeline = Pipeline([("scaler", StandardScaler()),
# ("svm", SVC())])
# short version:
pipeline = make_pipeline(StandardScaler(), SVC())
pipeline.fit(X_train, y_train)
pipeline.score(X_test, y_test)
pipeline.predict(X_test)
from sklearn.cross_validation import cross_val_score
cross_val_score(pipeline, X_train, y_train)
import numpy as np
from sklearn.grid_search import GridSearchCV
param_grid = {'svc__C': 10. ** np.arange(-3, 3),
'svc__gamma' : 10. ** np.arange(-3, 3)
}
grid_pipeline = GridSearchCV(pipeline, param_grid=param_grid)
grid_pipeline.fit(X_train, y_train)
grid_pipeline.score(X_test, y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cross-validated pipelines including scaling, we need to estimate mean and standard deviation separately for each fold.
Step2: Cross-validation with a pipeline
Step3: Grid Search with a pipeline
|
13,948
|
<ASSISTANT_TASK:>
Python Code:
# We really like this spiral dataset
import numpy as np
import matplotlib.pyplot as plt
N = 1000 # points per class
D = 2 # dimensionality at 2 so we can eyeball it
K = 3 # number of classes
X = np.zeros((N*K, D)) # generate an empty matrix to hold X features
y = np.zeros(N*K, dtype='int32') # switching this to int32
# for 3 classes, evenly generates spiral arms
for j in range(K):
ix = range(N*j, N*(j+1))
r = np.linspace(0.0,1,N) #radius
t = np.linspace(j*4, (j+1)*4, N) + np.random.randn(N)*0.2 # theta
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
y[ix] = j
plt.scatter(X[:,0], X[:,1], c=y, s=20, cmap=plt.cm.Spectral)
plt.show()
import tensorflow as tf
# what should the classifier expect in terms of features
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=D)]
# defining the actual classifier
dnn_spiral_classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
activation_fn = tf.nn.relu,
hidden_units = [20,10], # one hidden layer, containing 10 neurons
n_classes = K, # K target classes
model_dir="/tmp/spiral_model_learning") # directory for saving model checkpoints
# turn data into tensors to feed into the computational graph
# honestly input_fn could also handle these as np.arrays but this is here to show you that the tf.constant operation can run on np.array input
def get_inputs():
X_tensor = tf.constant(X)
y_tensor = tf.constant(y)
return X_tensor, y_tensor
# fit the model
dnn_spiral_classifier.fit(input_fn=get_inputs, steps=50)
#evaluating the accuracy
accuracy_score = dnn_spiral_classifier.evaluate(input_fn=get_inputs,
steps=1)["accuracy"]
from google.datalab.ml import TensorBoard
tb_id = TensorBoard.start("/tmp/spiral_model_learning")
# (optional) restrict logging level to errors, to keep things a little quieter
tf.logging.set_verbosity(tf.logging.ERROR)
for epoch in range(10):
dnn_spiral_classifier.fit(input_fn=get_inputs, steps=100)
dnn_spiral_classifier.evaluate(input_fn=get_inputs, steps=1)["accuracy"]
# The following line will wipe your tmp records if you want to "reset" everything while configuring your Tensorboard. Use it with caution!
do_I_want_to_reset = False
if do_I_want_to_reset:
tf.gfile.DeleteRecursively('/tmp/spiral_model_learning/')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A much more highly optimized DNNClassifier
Step2: Launching TensorBoard
Step3: Notice that the accuracy only contains one point. That's because we only ran dnn_spiral_classifier.fit and evaluate once and didn't tell it to record data during intermediary steps.
Step4: Refresh your Tensorboard to see the additional points.
|
13,949
|
<ASSISTANT_TASK:>
Python Code:
import pyperclip
import pyautogui
# PyAutoGUI中文输入需要用粘贴实现
# Python 2版本的pyperclip提供中文复制
def paste(foo):
pyperclip.copy(foo)
pyautogui.hotkey('ctrl', 'v')
foo = u'学而时习之'
# 移动到文本框
pyautogui.click(130,30)
paste(foo)
import pyautogui
screenWidth, screenHeight = pyautogui.size()
pyautogui.moveTo(screenWidth / 2, screenHeight / 2)
import pyautogui
screenWidth, screenHeight = pyautogui.size()
currentMouseX, currentMouseY = pyautogui.position()
pyautogui.moveTo(100, 150)
pyautogui.click()
# 鼠标向下移动10像素
pyautogui.moveRel(None, 10)
pyautogui.doubleClick()
# 用缓动/渐变函数让鼠标2秒后移动到(500,500)位置
# use tweening/easing function to move mouse over 2 seconds.
pyautogui.moveTo(1800, 500, duration=2, tween=pyautogui.easeInOutQuad)
# 在每次输入之间暂停0.25秒
pyautogui.typewrite('Hello world!', interval=0.25)
pyautogui.press('esc')
pyautogui.keyDown('shift')
pyautogui.press(['left', 'left', 'left', 'left', 'left', 'left'])
pyautogui.keyUp('shift')
pyautogui.hotkey('ctrl', 'c')
distance = 200
while distance > 0:
pyautogui.dragRel(distance, 0, duration=0.5) # 向右
distance -= 5
pyautogui.dragRel(0, distance, duration=0.5) # 向下
pyautogui.draIn gRel(-distance, 0, duration=0.5) # 向左
distance -= 5
pyautogui.dragRel(0, -distance, duration=0.5) # 向上
import pyautogui
pyautogui.FAILSAFE = False
import pyautogui
pyautogui.PAUSE = 2.5
pyautogui.moveTo(100,100); pyautogui.click()
import pyautogui
# 当前鼠标的坐标
pyautogui.position()
# 当前屏幕的分辨率(宽度和高度)
pyautogui.size()
# (x,y)是否在屏幕上
x, y = 122, 244
pyautogui.onScreen(x, y)
import pyautogui
pyautogui.PAUSE = 2.5
import pyautogui
pyautogui.FAILSAFE = True
num_seconds = 1.2
# 用num_seconds秒的时间把光标移动到(x, y)位置
pyautogui.moveTo(x, y, duration=num_seconds)
# 用num_seconds秒的时间把光标的X轴(水平)坐标移动xOffset,
# Y轴(竖直)坐标向下移动yOffset。
xOffset, yOffset = 50, 100
pyautogui.moveRel(xOffset, yOffset, duration=num_seconds)
pyautogui.click(x=moveToX, y=moveToY, clicks=num_of_clicks, interval=secs_between_clicks, button='left')
pyautogui.rightClick(x=moveToX, y=moveToY)
pyautogui.middleClick(x=moveToX, y=moveToY)
pyautogui.doubleClick(x=moveToX, y=moveToY)
pyautogui.tripleClick(x=moveToX, y=moveToY)
pyautogui.scroll(clicks=amount_to_scroll, x=moveToX, y=moveToY)
pyautogui.mouseDown(x=moveToX, y=moveToY, button='left')
pyautogui.mouseUp(x=moveToX, y=moveToY, button='left')
# 每次键入的时间间隔
secs_between_keys = 0.1
pyautogui.typewrite('Hello world!\n', interval=secs_between_keys)
pyautogui.typewrite(['a', 'b', 'c', 'left', 'backspace', 'enter', 'f1'], interval=secs_between_keys)
pyautogui.KEYBOARD_KEYS[:10]
pyautogui.hotkey('ctrl', 'a') # 全选
pyautogui.hotkey('ctrl', 'c') # 复制
pyautogui.hotkey('ctrl', 'v') # 粘贴
pyautogui.keyDown(key_name)
pyautogui.keyUp(key_name)
pyautogui.alert('这个消息弹窗是文字+OK按钮')
pyautogui.confirm('这个消息弹窗是文字+OK+Cancel按钮')
pyautogui.prompt('这个消息弹窗是让用户输入字符串,单击OK')
# 返回一个Pillow/PIL的Image对象
pyautogui.screenshot()
pyautogui.screenshot('foo.png')
# 返回(最左x坐标,最顶y坐标,宽度,高度)
pyautogui.locateOnScreen('pyautogui/looks.png')
for i in pyautogui.locateAllOnScreen('pyautogui/looks.png'):
print(i)
list(pyautogui.locateAllOnScreen('pyautogui/looks.png'))
pyautogui.locateCenterOnScreen('pyautogui/looks.png')
pyautogui.size()
pyautogui.position()
# ! python 3
import pyautogui
print('Press Ctrl-C to quit')
try:
while True:
x, y = pyautogui.position()
positionStr = 'X: {} Y: {}'.format(*[str(x).rjust(4) for x in [x, y]])
print(positionStr, end='')
print('\b' * len(positionStr), end='', flush=True)
except KeyboardInterrupt:
print('\n')
# ! python
import pyautogui, sys
print('Press Ctrl-C to quit.')
try:
while True:
x, y = pyautogui.position()
positionStr = 'X: ' + str(x).rjust(4) + ' Y: ' + str(y).rjust(4)
print positionStr,
print '\b' * (len(positionStr) + 2),
sys.stdout.flush()
except KeyboardInterrupt:
print '\n'
import pyautogui
pyautogui.onScreen(0, 0)
pyautogui.onScreen(0, -1)
pyautogui.onScreen(0, 2080)
pyautogui.onScreen(1920, 1080)
pyautogui.onScreen(1919, 1079)
pyautogui.moveTo(100, 200) # 光标移动到(100, 200)位置
pyautogui.moveTo(None, 500) # 光标移动到(100, 500)位置
pyautogui.moveTo(600, None) # 光标移动到(600, 500)位置
pyautogui.moveTo(100, 200, duration=2) # 用2秒把光标移动到(100, 200)位置
pyautogui.moveTo(100, 200) #把光标移动到(100, 200)位置
pyautogui.moveRel(0, 50) #向下移动50
pyautogui.moveRel(30, 0, 2) #向右移动30
pyautogui.moveRel(30, None) #向右移动30
# 按住鼠标左键,把鼠标拖拽到(100, 200)位置
pyautogui.dragTo(100, 200, button='left')
# 按住鼠标左键,用2秒钟把鼠标拖拽到(300, 400)位置
pyautogui.dragTo(300, 400, 2, button='left')
# 按住鼠标右键,用2秒钟把鼠标拖拽到(30,0)位置
pyautogui.dragTo(30, 0, 2, button='right')
# 开始很慢,不断加速
pyautogui.moveTo(100, 100, 2, pyautogui.easeInQuad)
# 开始很快,不断减速
pyautogui.moveTo(100, 100, 2, pyautogui.easeOutQuad)
# 开始和结束都快,中间比较慢
pyautogui.moveTo(100, 100, 2, pyautogui.easeInOutQuad)
# 一步一徘徊前进
pyautogui.moveTo(100, 100, 2, pyautogui.easeInBounce)
# 徘徊幅度更大,甚至超过起点和终点
pyautogui.moveTo(100, 100, 2, pyautogui.easeInElastic)
pyautogui.click()
# 先移动到(100, 200)再单击
pyautogui.click(x=100, y=200, duration=2)
pyautogui.click(button='right')
# 双击左键
pyautogui.click(clicks=2)
# 两次单击之间停留0.25秒
pyautogui.click(clicks=2, interval=0.25)
# 三击右键
pyautogui.click(button='right', clicks=2, interval=0.25)
# 鼠标左键按下再松开
pyautogui.mouseDown(); pyautogui.mouseUp()
# 按下鼠标右键
pyautogui.mouseDown(button='right')
# 移动到(100, 200)位置,然后松开鼠标右键
pyautogui.mouseUp(button='right', x=100, y=200)
# 向上滚动10格
pyautogui.scroll(10)
# 向下滚动10格
pyautogui.scroll(-10)
# 移动到(100, 100)位置再向上滚动10格
pyautogui.scroll(10, x=100, y=100)
# 向右滚动10格
pyautogui.hscroll(10)
# 向左滚动10格
pyautogui.hscroll(-10)
# 输入Hello world!
pyautogui.typewrite('Hello world!')
# 每次输入间隔0.25秒,输入Hello world!
pyautogui.typewrite('Hello world!', interval=0.25)
# ENTER键
pyautogui.press('enter')
# F1键
pyautogui.press('f1')
# 左方向键
pyautogui.press('left')
# 按下`shift`键
pyautogui.keyDown('shift')
pyautogui.press('left')
pyautogui.press('left')
pyautogui.press('left')
# 松开`shift`键
pyautogui.keyUp('shift')
pyautogui.press(['left', 'left', 'left'])
pyautogui.hotkey('ctrl', 'shift', 'ese')
pyautogui.keyDown('ctrl')
pyautogui.keyDown('shift')
pyautogui.keyDown('esc')
pyautogui.keyUp('esc')
pyautogui.keyUp('shift')
pyautogui.keyUp('ctrl')
print(pyautogui.KEYBOARD_KEYS)
pyautogui.alert(text='', title='', button='OK')
# OK和Cancel按钮的消息弹窗
pyautogui.confirm(text='', title='', buttons=['OK', 'Cancel'])
# 10个按键0-9的消息弹窗
pyautogui.confirm(text='', title='', buttons=range(10))
pyautogui.prompt(text='', title='' , default='')
pyautogui.password(text='', title='', default='', mask='*')
import pyautogui
im1 = pyautogui.screenshot()
im2 = pyautogui.screenshot('my_screenshot.png')
im = pyautogui.screenshot(region=(0, 0, 300 ,400))
import pyautogui
button7location = pyautogui.locateOnScreen('pyautogui/calc7key.png')
button7location
button7x, button7y = pyautogui.center(button7location)
button7x, button7y
pyautogui.click(button7x, button7y)
import pyautogui
x, y = pyautogui.locateCenterOnScreen('pyautogui/calc7key.png')
pyautogui.click(x, y)
for pos in pyautogui.locateAllOnScreen('pyautogui/calc7key.png'):
print(pos)
list(pyautogui.locateAllOnScreen('pyautogui/calc7key.png'))
import pyautogui
button7location = pyautogui.locateOnScreen('pyautogui/calc7key.png', grayscale=True)
button7location
import pyautogui
im = pyautogui.screenshot()
im.getpixel((100, 200))
pyautogui.pixel(100, 200)
pyautogui.pixelMatchesColor(100, 200, (255, 255, 255))
pyautogui.pixelMatchesColor(100, 200, (255, 255, 245))
pyautogui.pixelMatchesColor(100, 200, (255, 255, 245), tolerance=10)
pyautogui.pixelMatchesColor(100, 200, (248, 250, 245), tolerance=10)
pyautogui.pixelMatchesColor(100, 200, (205, 255, 245), tolerance=10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.简介
Step2: PyAutoGUI可以模拟鼠标的移动、点击、拖拽,键盘按键输入、按住操作,以及鼠标+键盘的热键同时按住等操作,可以说手能动的都可以。
Step3: 1.4 保护措施(Fail-Safes)
Step4: 通过把pyautogui.PAUSE设置成float或int时间(秒),可以为所有的PyAutoGUI函数增加延迟。默认延迟时间是0.1秒。在函数循环执行的时候,这样做可以让PyAutoGUI运行的慢一点,非常有用。例如:
Step5: 所有的PyAutoGUI函数在延迟完成前都处于阻塞状态(block)。(未来计划增加一个可选的非阻塞模式来调用函数。)
Step6: 3.2 保护措施
Step7: 当pyautogui.FAILSAFE = True时,如果把鼠标光标在屏幕左上角,PyAutoGUI函数就会产生pyautogui.FailSafeException异常。
Step8: 3.3 鼠标函数
Step9: click()函数就是让鼠标点击,默认是单击左键,参数可以设置:
Step10: 其中,button属性可以设置成left,middle和right。
Step11: scroll函数控制鼠标滚轮的滚动,amount_to_scroll参数表示滚动的格数。正数则页面向上滚动,负数则向下滚动:
Step12: 每个按键按下和松开两个事件可以分开处理:
Step13: 3.4 键盘函数
Step14: 多个键也可以:
Step15: 按键名称列表:
Step16: 键盘的一些热键像Ctrl-S或Ctrl-Shift-1都可以用hotkey()函数来实现:
Step17: 每个按键的按下和松开也可以单独调用:
Step18: 3.5 消息弹窗函数
Step19: 在prompt()函数中,如果用户什么都不输入,就会返回None。
Step20: 如果你有一个图片文件想在上面做点击操作,你可以用locateOnScreen()函数来定位。
Step21: locateAllOnScreen()函数会寻找所有相似图片,返回一个生成器:
Step22: locateCenterOnScreen()函数会返回图片在屏幕上的中心XY轴坐标值:
Step23: 如果没找到图片会返回None。
Step24: 下面是Python 3版本的光标位置记录程序:
Step25: Python 2版本是:
Step26: 要检查XY坐标是否在屏幕上,需要用onScreen()函数来检验,如果在屏幕上返回True:
Step27: 5.2 鼠标行为
Step28: 一般鼠标光标都是瞬间移动到指定的位置,如果你想让鼠标移动的慢点,可以设置持续时间:
Step29: 默认的持续时间pyautogui.MINIMUM_DURATION是0.1秒,如果你设置的时间比默认值还短,那么就会瞬间执行。
Step30: 5.3 鼠标拖拽
Step31: 5.4 缓动/渐变(Tween / Easing)函数
Step32: 这些效果函数是模仿Al Sweigart的PyTweening模块,可以直接使用,不需要额外安装。
Step33: 如果单机之前要先移动,可以把目标的XY坐标值传入函数:
Step34: 可以通过button参数设置left,middle和right三个键。例如:
Step35: 要做多次单击可以设置clicks参数,还有interval参数可以设置每次单击之间的时间间隔。例如:
Step36: 为了操作方便,PyAutoGUI提供了doubleClick(),tripleClick()和rightClick()来实现双击、三击和右击操作。
Step37: 5.7 滚轮滚动函数
Step38: 在OS X和Linux平台上,PyAutoGUI还可以用hscroll()实现水平滚动。例如:
Step39: scroll()函数是vscroll()的一个包装(wrapper),执行竖直滚动。
Step40: typewrite()函数只能用于单个字符键,不能按SHITF和F1这些功能键。
Step41: press()函数其实是keyDown()和keyUp()函数的包装,模拟的按下然后松开两个动作。这两个函数可以单独调用。例如,按下shift键的同时按3次左方向键:
Step42: 和typewrite()函数一样,可以用数组把一组键传入press()。例如:
Step43: 6.3 hotkey()函数
Step44: 等价于:
Step45: 6.4 KEYBOARD_KEYS
Step46: 7 消息弹窗函数
Step47: 显示一个简单的带文字和OK按钮的消息弹窗。用户点击后返回button的文字。
Step48: 显示一个简单的带文字、OK和Cancel按钮的消息弹窗,用户点击后返回被点击button的文字,支持自定义数字、文字的列表。
Step49: 可以输入的消息弹窗,带OK和Cancel按钮。用户点击OK按钮返回输入的文字,点击Cancel按钮返回None。
Step50: 样式同prompt(),用于输入密码,消息用*表示。带OK和Cancel按钮。用户点击OK按钮返回输入的文字,点击Cancel按钮返回None。
Step51: 在一个$1920 \times 1080$的屏幕上,screenshot()函数要消耗100微秒——不快也不慢。
Step52: 8.3 定位函数
Step53: locateCenterOnScreen()等价于上面的前两布操作,直接获得截屏屏幕中心坐标:
Step54: 在$1920 \times 1080$的屏幕上,定位函数需要1~2秒时间。对视频游戏(LOL、DOTA)来说就太慢了,但是上班干活还是绰绰有余。
Step55: 8.3.1 灰度值匹配
Step56: 8.3.2 像素匹配
Step57: 也可以用PyAutoGUI的pixel()函数,是之前调用的包装:
Step58: 如果你只是要检验一下指定位置的像素值,可以用pixelMatchesColor()函数,把X、Y和RGB元组值穿入即可:
Step59: tolerance参数可以指定红、绿、蓝3种颜色误差范围:
|
13,950
|
<ASSISTANT_TASK:>
Python Code:
cat /proc/cpuinfo
# import libraries and set up the molecule geometry
from ase.units import Ry, eV, Ha
from ase.calculators.siesta import Siesta
from ase import Atoms
import numpy as np
import matplotlib.pyplot as plt
H2O = Atoms('H2O', positions = [[-0.757, 0.586, 0.000],
[0.757, 0.586, 0.000],
[0.0, 0.0, 0.0]],
cell=[20, 20, 20])
# visualization of the particle
from ase.visualize import view
view(H2O, viewer='x3d')
# enter siesta input and run siesta
siesta = Siesta(
mesh_cutoff=150 * Ry,
basis_set='DZP',
pseudo_qualifier='lda',
energy_shift=(10 * 10**-3) * eV,
fdf_arguments={
'SCFMustConverge': False,
'COOP.Write': True,
'WriteDenchar': True,
'PAO.BasisType': 'split',
'DM.Tolerance': 1e-4,
'DM.MixingWeight': 0.1,
'MaxSCFIterations': 300,
'DM.NumberPulay': 4,
'XML.Write': True})
H2O.set_calculator(siesta)
e = H2O.get_potential_energy()
# compute polarizability using pyscf-nao
siesta.pyscf_tddft(label="siesta", jcutoff=7, iter_broadening=0.15/Ha,
xc_code='LDA,PZ', tol_loc=1e-6, tol_biloc=1e-7, freq = np.arange(0.0, 15.0, 0.05))
# plot polarizability with matplotlib
%matplotlib inline
fig = plt.figure(1)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(siesta.results["freq range"], siesta.results["polarizability nonin"][:, 0, 0].imag)
ax2.plot(siesta.results["freq range"], siesta.results["polarizability inter"][:, 0, 0].imag)
ax1.set_xlabel(r"$\omega$ (eV)")
ax2.set_xlabel(r"$\omega$ (eV)")
ax1.set_ylabel(r"Im($P_{xx}$) (au)")
ax2.set_ylabel(r"Im($P_{xx}$) (au)")
ax1.set_title(r"Non interacting")
ax2.set_title(r"Interacting")
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I do not have on my laptop an
Step2: We can then run the DFT calculation using Siesta
Step3: The TDDFT calculations with PySCF-NAO
|
13,951
|
<ASSISTANT_TASK:>
Python Code:
n = 50000
min_timestamp = '2000-01-01T00:00:00Z' # start of time
t1 = time.time()
query =
SELECT
*
FROM
enwiki.article_talk_diff_no_bot_sample
WHERE
rev_timestamp > '%(min_timestamp)s'
AND ns = 'article'
LIMIT %(n)d
params = {
'n': int(n * 1.7),
'min_timestamp': min_timestamp
}
df = query_hive_ssh(query % params, '../../data/raw_random_sample.tsv', priority = True, quoting=3, delete = True)
df.columns = [c.split('.')[1] for c in df.columns]
t2 = time.time()
print('Query and Download Time:', (t2-t1) / 60.0)
dfc = clean(df[300:500])
show_comments(dfc, 100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Query
|
13,952
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('/home/pi/minecraft-programming')
import mcpi.block as block
import time
import drawings
# Task 1 program
userName="blah"
mc.postToChat(userName)
# Task 2 program
drawings.drawMyCircle(radius, blockToUse)
# Task 3 program
# Task 4 program
drawings.drawSolid(shape, length, blockToUse)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Task 1
Step2: Task 2
Step3: 3D Shapes and Polyhedrons
Step4: Task 4
|
13,953
|
<ASSISTANT_TASK:>
Python Code:
import flexcode
import numpy as np
import xgboost as xgb
from flexcode.regression_models import XGBoost, CustomModel
def generate_data(n_draws):
x = np.random.normal(0, 1, n_draws)
z = np.random.normal(x, 1, n_draws)
return x, z
x_train, z_train = generate_data(5000)
x_validation, z_validation = generate_data(5000)
x_test, z_test = generate_data(5000)
# Parameterize model
model = flexcode.FlexCodeModel(XGBoost, max_basis=31, basis_system="cosine",
regression_params={'max_depth': 3, 'learning_rate': 0.5, 'objective': 'reg:linear'})
# Fit and tune model
model.fit(x_train, z_train)
cdes_predict_xgb, z_grid = model.predict(x_test, n_grid=200)
model.__dict__
import pickle
pickle.dump(file=open('example.pkl', 'wb'), obj=model,
protocol=pickle.HIGHEST_PROTOCOL)
model = pickle.load(open('example.pkl', 'rb'))
model.__dict__
cdes_predict_xgb, z_grid = model.predict(x_test, n_grid=200)
# Parameterize model
my_model = xgb.XGBRegressor
model_c = flexcode.FlexCodeModel(CustomModel, max_basis=31, basis_system="cosine",
regression_params={'max_depth': 3, 'learning_rate': 0.5, 'objective': 'reg:linear'},
custom_model=my_model)
# Fit and tune model
model_c.fit(x_train, z_train)
cdes_predict_custom, z_grid = model_c.predict(x_test, n_grid=200)
np.max(np.abs(cdes_predict_custom - cdes_predict_xgb))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Creation
Step2: FlexZBoost
Step3: Custom Model
Step4: The two conditional density estimates should be the same across the board. <br>
|
13,954
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.imports import *
from fastai.structured import *
from pandas_summary import DataFrameSummary
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from IPython.display import display
from sklearn import metrics
PATH = "data/bulldozers/"
!ls {PATH}
df_raw = pd.read_csv(f'{PATH}Train.csv', low_memory=False,
parse_dates=["saledate"])
def display_all(df):
with pd.option_context("display.max_rows", 1000):
with pd.option_context("display.max_columns", 1000):
display(df)
display_all(df_raw.tail().transpose())
display_all(df_raw.describe(include='all').transpose())
df_raw.SalePrice = np.log(df_raw.SalePrice)
m = RandomForestRegressor(n_jobs=-1)
m.fit(df_raw.drop('SalePrice', axis=1), df_raw.SalePrice)
add_datepart(df_raw, 'saledate')
df_raw.saleYear.head()
df_raw.columns
train_cats(df_raw)
df_raw.UsageBand
df_raw.UsageBand.cat.categories
# we can do .cat.codes to get the actual numbers
df_raw.UsageBand.cat.codes
df_raw.UsageBand.cat.set_categories(['High', 'Medium', 'Low'], ordered=True, inplace=True)
df_raw.UsageBand = df_raw.UsageBand.cat.codes
display_all(df_raw.isnull().sum().sort_index()/len(df_raw))
os.makedirs('tmp', exist_ok=True)
df_raw.to_feather('tmp/bulldozers-raw.feather')
# df_raw = pd.read_feather('tmp/bulldozers-raw.feather')
df, y, nas = proc_df(df_raw, 'SalePrice')
??numericalize
df.columns
m = RandomForestRegressor(n_jobs=-1)
m.fit(df, y)
m.score(df, y)
def split_vals(a, n): return a[:n].copy(), a[n:].copy()
n_valid = 12000 # same as Kaggle's test set size
n_trn = len(df) - n_valid
raw_train, raw_valid = split_vals(df_raw, n_trn)
X_train, X_valid = split_vals(df, n_trn)
y_train, y_valid = split_vals(y, n_trn)
X_train.shape, y_train.shape, X_valid.shape
def rmse(x,y): return math.sqrt(((x-y)**2).mean())
def print_score(m):
res = [rmse(m.predict(X_train), y_train), rmse(m.predict(X_valid), y_valid),
m.score(X_train, y_train), m.score(X_valid, y_valid)]
if hasattr(m, 'oob_score_'): res.append(m.oob_score_)
print(res)
m = RandomForestRegressor(n_jobs=-1)
%time m.fit(X_train, y_train)
print_score(m)
df_trn, y_trn, nas = proc_df(df_raw, 'SalePrice', subset=30000, na_dict=nas)
X_train, _ = split_vals(df_trn, 20000)
y_train, _ = split_vals(y_trn, 20000)
m = RandomForestRegressor(n_jobs=-1) # n_jobs=-1: set to num. cores on CPU
%time m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=1, max_depth=3, bootstrap=False, n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
draw_tree(m.estimators_[0], df_trn, precision=3)
df_raw.fiProductClassDesc.cat.categories
# df_raw.fiProductClassDesc.cat.codes
m = RandomForestRegressor(n_estimators=1, bootstrap=False, n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
preds = np.stack([t.predict(X_valid) for t in m.estimators_])
preds[:,0], np.mean(preds[:,0]), y_valid[0]
preds.shape
plt.plot([metrics.r2_score(y_valid, np.mean(preds[:i+1], axis=0)) for i in range(10)]);
m = RandomForestRegressor(n_estimators=20, n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=40, n_jobs=-1)
m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=80, n_jobs=-1)
%time m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=160, n_jobs=-1)
%time m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=160, n_jobs=-1)
%time m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=40, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
print_score(m)
n_trn
df_trn, y_trn, nas = proc_df(df_raw, 'SalePrice')
X_train, X_valid = split_vals(df_trn, n_trn)
y_train, y_valid = split_vals(y_trn, n_trn)
len(df_trn), len(X_train)
set_rf_samples(20000)
m = RandomForestRegressor(n_jobs=-1, oob_score=True)
%time m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=40, n_jobs=-1, oob_score=True)
%time m.fit(X_train, y_train)
print_score(m)
reset_rf_samples()
m = RandomForestRegressor(n_estimators=40, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, n_jobs=-1, oob_score=True)
%time m.fit(X_train, y_train)
print_score(m)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
print_score(m)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Data
Step2: In any sort of analytics work, it's important to look at your data, to make sure you understand the format, how it's stored, what type of values it holds, etc. Even if you've read descriptions about your data, the actual data may not be what you expect.
Step3: Lecture 2 00
Step4: 2.2.2 Initial Processing
Step5: From the error above, we see we need all our columns to be numbers.
Step6: We can see those new date columns below where 'saledate' used to be
Step7: The new date columns are numbers, but
Step8: At first glance it doesn't look like anything's changed, but if you take a deeper look, you'll see the data type has changed to 'category'. 'category' is a Pandas class, with attributes accesible via .cat.xxxx
Step9: To actually use this dataset and turn it into numbers, what we need to do is to take every categorical column and replace it with .cat.codes
Step10: 2.2.3 Pre-processing
Step11: The R^2 score shown below shows the variance (or mean?) of the data. It shows how much the data varies.
Step12: A validation set helps handle the issue of overfitting. Make it st it shares the test set's properties, ie
Step13: Lecture 2 00
Step14: Here we see our model, which had 0.982 R2 on the training set, got only 0.887 on the validation set, which makes us think it's overfitting quite badly. However it's not too badly because the RMSE on the logs of the prices (0.25) would've put us in the top 25% of the competition anyway (100/407).
Step15: Instead of 83 seconds of total compute time (15.2s thanks to multi-cores), we now run in only 2.94 total seconds of compute.
Step16: After fitting the model and printing the score, the R2 score has dropped from 0.77 to 0.39. This is not a good model. It's better than the Mean-model (being > 0) but still not good.
Step17: A tree is a series of binary decisions, or splits. Our tree first of all decided to split on Coupler_System ≤ 0.5. That's actually a boolean variable, True/False. Within the group where it was True, it further split those into YearMade ≤ 1988 (1987.5), and on, etc.
Step18: If we don't limit depth, the training R^2 is of of course, a perfect 1.0. Because we can exactly predict every training element because it's in a leaf-node all it's own.
Step19: We'll grab the predictions for each individual tree, and look at one example.
Step20: We see a shape of 10 different sets of predictions and for each one our validation set of size 12,000 -- so 12,000 predictions for each of the 10 trees
Step21: Above, preds[
Step22: Note that the final value on the plot is the same as the final R^2 score returned by the RandomForest -- about 0.7748 here.
Step23: At this point, it looks like we're inside signal noise. More trees is never going to make the model worse - but a lower score is easily explained as whatever diminished accuracy gain being overwhelmed by noise in the random-sampling of the data.
Step24: Well there you have it. And the highest score so far to boot.
Step25: The extra value at the end is the R2 for the oob score. We want it to be very close to the R2 for the validation set (2nd to last value) although that doesn't seem to be the case here.
Step26: The basic idea is this
Step27: We don't see that much of an improvement over the R2 with the 20k data-subset, because we haven't used many estimators yet.
Step28: With more estimators the model can see a larger portion of the data, and the R2 (2nd last value) has gone up from 0.8591 to 0.8755.
Step29: Let's get a baseline for this full set to compare to. This'll train 40 estimators all the way down until the leaf nodes have just one sample in them.
Step30: This gets us a 0.899 R2 on the validation set, or a 0.908 on the OOB.
Step31: Setting min_samples_leaf = 3 stops the RF when each leaf-node has 3 or fewer samples in it. In practice this means 1 or 2 fewer levels of decisions being made - which means around half the number of decision criteria, so it'll train much quicker. It also means when we look at an individual tree, instead of taking one point, wer're taking the average of 3 points - so we'd expect the trees to generalize better, though each tree is likely to be less powerful than before.
|
13,955
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
probabilit = [0.333, 0.334, 0.333]
lista_elegir = [(3, 3), (3, 4), (3, 5)]
samples = 1000
np.random.seed(42)
temp = np.array(lista_elegir)
result = temp[np.random.choice(len(lista_elegir),samples,p=probabilit)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
13,956
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import display
from IPython.core.display import HTML
import warnings
warnings.filterwarnings('ignore')
import os
if os.getcwd().split('/')[-1] == 'notebooks':
os.chdir('../')
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas_profiling
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
# written by Gilles Louppe and distributed under the BSD 3 clause
from src.vn_datasci.blagging import BlaggingClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import fbeta_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import classification_report
# self-authored library that to facilatate ML classification and evaluation
from src.vn_datasci.skhelper import LearningModel, eval_db
from IPython.display import display
from IPython.core.display import HTML
import warnings
warnings.filterwarnings('ignore')
import os
if os.getcwd().split('/')[-1] == 'notebooks':
os.chdir('../')
%matplotlib inline
#%config figure_format='retina'
plt.rcParams.update({'figure.figsize': (10, 7)})
sns.set_context("notebook", font_scale=1.75, rc={"lines.linewidth": 1.25})
sns.set_style("darkgrid")
sns.set_palette("deep")
pd.options.display.width = 80
pd.options.display.max_columns = 50
pd.options.display.max_rows = 50
def load_dataset(path='data/raw/lc_historical.csv'):
lc = pd.read_csv(path, index_col='id', memory_map=True, low_memory=False)
lc.loan_status = pd.Categorical(lc.loan_status, categories=['Fully Paid', 'Charged Off'])
return lc
dataset = load_dataset()
def calc_incomplete_stats(dataset):
warnings.filterwarnings("ignore", 'This pattern has match groups')
missing_data = pd.DataFrame(index=dataset.columns)
missing_data['Null'] = dataset.isnull().sum()
missing_data['NA_or_Missing'] = (
dataset.apply(lambda col: (
col.str.contains('(^$|n/a|^na$|^%$)', case=False).sum()))
.fillna(0).astype(int))
missing_data['Incomplete'] = (
(missing_data.Null + missing_data.NA_or_Missing) / len(dataset))
incomplete_stats = ((missing_data[(missing_data > 0).any(axis=1)])
.sort_values('Incomplete', ascending=False))
return incomplete_stats
def display_incomplete_stats(incomplete_stats):
stats = incomplete_stats.copy()
df_incomplete = (
stats.style
.set_caption('Missing')
.background_gradient(cmap=sns.light_palette("orange", as_cmap=True),
low=0, high=1, subset=['Null', 'NA_or_Missing'])
.background_gradient(cmap=sns.light_palette("red", as_cmap=True),
low=0, high=.6, subset=['Incomplete'])
.format({'Null': '{:,}', 'NA_or_Missing': '{:,}', 'Incomplete': '{:.1%}'}))
display(df_incomplete)
def plot_incomplete_stats(incomplete_stats, ylim_range=(0, 100)):
stats = incomplete_stats.copy()
stats.Incomplete = stats.Incomplete * 100
_ = sns.barplot(x=stats.index.tolist(), y=stats.Incomplete.tolist())
for item in _.get_xticklabels():
item.set_rotation(45)
_.set(xlabel='Feature', ylabel='Incomplete (%)',
title='Features with Missing or Null Values',
ylim=ylim_range)
plt.show()
def incomplete_data_report(dataset, display_stats=True, plot=True):
incomplete_stats = calc_incomplete_stats(dataset)
if display_stats:
display_incomplete_stats(incomplete_stats)
if plot:
plot_incomplete_stats(incomplete_stats)
incomplete_stats = load_dataset().pipe(calc_incomplete_stats)
display(incomplete_stats)
plot_incomplete_stats(incomplete_stats)
def clean_data(lc):
lc = lc.copy().dropna(axis=1, thresh=1)
dt_features = ['earliest_cr_line', 'issue_d']
lc[dt_features] = lc[dt_features].apply(
lambda col: pd.to_datetime(col, format='%Y-%m-%d'), axis=0)
cat_features =['purpose', 'home_ownership', 'addr_state']
lc[cat_features] = lc[cat_features].apply(pd.Categorical, axis=0)
lc.revol_util = (lc.revol_util
.str.extract('(\d+\.?\d?)', expand=False)
.astype('float'))
lc.emp_length = (lc.emp_length
.str.extract('(< 1|10\+|\d+)', expand=False)
.replace('< 1', '0.5')
.replace('10+', '10.5')
.fillna('-1.5')
.astype('float'))
return lc
dataset = load_dataset().pipe(clean_data)
def add_features(lc):
# ratio of loan amount to annual income
group_labels = ['low', 'avg', 'high']
lc['loan_amnt_to_inc'] = (
pd.cut((lc.loan_amnt / lc.annual_inc), 3, labels=['low', 'avg', 'high'])
.cat.set_categories(['low', 'avg', 'high'], ordered=True))
# age of first credit line from when the loan was issued
lc['earliest_cr_line_age'] = (lc.issue_d - lc.earliest_cr_line).astype(int)
# the ratio of avg current balance to annual income
lc['avg_cur_bal_to_inc'] = lc.avg_cur_bal / lc.annual_inc
# the ratio of avg current balance to loan amount
lc['avg_cur_bal_to_loan_amnt'] = lc.avg_cur_bal / lc.loan_amnt
# grouping level of accounts opened in the last 2 yrs
lc['acc_open_past_24mths_groups'] = (
pd.qcut(lc.acc_open_past_24mths, 3, labels=['low', 'avg', 'high'])
.cat.add_categories(['unknown']).fillna('unknown')
.cat.set_categories(['low', 'avg', 'high', 'unknown'], ordered=True))
return lc
dataset = load_dataset().pipe(clean_data).pipe(add_features)
def drop_features(lc):
target_leaks = ['recoveries', 'issue_d']
other_features = ['earliest_cr_line', 'acc_open_past_24mths', 'addr_state']
to_drop = target_leaks + other_features
return lc.drop(to_drop, axis=1)
dataset = load_dataset().pipe(clean_data).pipe(add_features).pipe(drop_features)
def load_and_preprocess_data():
return (load_dataset()
.pipe(clean_data)
.pipe(add_features)
.pipe(drop_features))
def plot_factor_pct(dataset, feature):
if feature not in dataset.columns:
return
y = dataset[feature]
factor_counts = y.value_counts()
x_vals = factor_counts.index.tolist()
y_vals = ((factor_counts.values/factor_counts.values.sum())*100).round(2)
sns.barplot(y=x_vals, x=y_vals);
def plot_pct_charged_off(lc, feature):
lc_counts = lc[feature].value_counts()
charged_off = lc[lc.loan_status=='Charged Off']
charged_off_counts = charged_off[feature].value_counts()
charged_off_ratio = ((charged_off_counts / lc_counts * 100)
.round(2).sort_values(ascending=False))
x_vals = charged_off_ratio.index.tolist()
y_vals = charged_off_ratio
sns.barplot(y=x_vals, x=y_vals);
processed_dataset = load_and_preprocess_data()
incomplete_stats = calc_incomplete_stats(processed_dataset)
display(incomplete_stats)
plot_incomplete_stats(incomplete_stats)
processed_dataset.pipe(plot_factor_pct, 'loan_status')
HTML(processed_dataset.pipe(pandas_profiling.ProfileReport).html)
def to_xy(dataset):
y = dataset.pop('loan_status').cat.codes
X = pd.get_dummies(dataset, drop_first=True)
return X, y
X, y = load_and_preprocess_data().pipe(to_xy)
split_data = train_test_split(X, y, test_size=0.20, stratify=y, random_state=11)
X_train, X_test, y_train, y_test = split_data
train_test_sets = dict(
zip(['X_train', 'X_test', 'y_train', 'y_test'], [*split_data]))
(pd.DataFrame(
data={'Observations (#)': [X_train.shape[0], X_test.shape[0]],
'Percent (%)': ['80%', '20%'],
'Features (#)': [X_train.shape[1], X_test.shape[1]]},
index=['Training', 'Test'])
[['Percent (%)', 'Features (#)', 'Observations (#)']])
dummy_model = LearningModel(
'Naive Predictor - Baseline', Pipeline([
('imp', Imputer(strategy='median')),
('clf', DummyClassifier(strategy='constant', constant=0))]))
dummy_model.fit_and_predict(**train_test_sets)
model_evals = eval_db(dummy_model.eval_report)
tree_model = LearningModel(
'Decision Tree Classifier', Pipeline([
('imp', Imputer(strategy='median')),
('clf', DecisionTreeClassifier(class_weight='balanced', random_state=11))]))
tree_model.fit_and_predict(**train_test_sets)
tree_model.display_evaluation()
model_evals = eval_db(model_evals, tree_model.eval_report)
rf_model = LearningModel(
'Random Forest Classifier', Pipeline([
('imp', Imputer(strategy='median')),
('clf', RandomForestClassifier(
class_weight='balanced_subsample', random_state=11))]))
rf_model.fit_and_predict(**train_test_sets)
rf_model.display_evaluation()
model_evals = eval_db(model_evals, rf_model.eval_report)
blagging_pipeline = Pipeline([
('imp', Imputer(strategy='median')),
('clf', BlaggingClassifier(
random_state=11, n_jobs=-1,
base_estimator=RandomForestClassifier(
class_weight='balanced_subsample', random_state=11)))])
blagging_model = LearningModel('Blagging Classifier (RF)', blagging_pipeline)
blagging_model.fit_and_predict(**train_test_sets)
blagging_model.display_evaluation()
model_evals = eval_db(model_evals, blagging_model.eval_report)
blagging_clf = BlaggingClassifier(
random_state=11, n_jobs=-1,
base_estimator=ExtraTreesClassifier(
criterion='entropy', class_weight='balanced_subsample',
max_features=None, n_estimators=60, random_state=11))
blagging_model = LearningModel(
'Blagging Classifier (Extra Trees)', Pipeline([
('imp', Imputer(strategy='median')),
('clf', blagging_clf)]))
blagging_model.fit_and_predict(**train_test_sets)
blagging_model.display_evaluation()
model_evals = eval_db(model_evals, blagging_model.eval_report)
rf_top_features = LearningModel('Random Forest Classifier',
Pipeline([('imp', Imputer(strategy='median')),
('clf', RandomForestClassifier(max_features=None,
class_weight='balanced_subsample', random_state=11))]))
rf_top_features.fit_and_predict(**train_test_sets)
rf_top_features.display_top_features(top_n=15)
rf_top_features.plot_top_features(top_n=10)
display(model_evals)
blagging_model = LearningModel('Blagging Classifier (Extra Trees)',
Pipeline([('imp', Imputer(strategy='median')),
('clf', BlaggingClassifier(
base_estimator=ExtraTreesClassifier(
criterion='entropy', class_weight='balanced_subsample',
max_features=None, n_estimators=60, random_state=11),
random_state=11, n_jobs=-1))]))
blagging_model.fit_and_predict(**train_test_sets)
(pd.DataFrame(data={'Benchmark Predictor': [0.7899, 0.1603, 0.5203],
'Unoptimized Model': [0.7499, 0.2602, 0.6463],
'Optimized Model': ['', '', '']},
index=['Accuracy Score', 'F1-score', 'AUC'])
[['Benchmark Predictor', 'Unoptimized Model', 'Optimized Model']])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notebook Config
Step2: Data Preprocessing
Step3: Exploration
Step4: Data Munging
Step5: Feature Engineering
Step6: Drop Features
Step7: Load & Prepare Function
Step8: Exploratory Data Analysis (EDA)
Step9: Overview
Step10: Factor Analysis
Step11: Summary Statistics
Step12: Predictive Modeling
Step13: Initializing Train/Test Sets
Step14: Classification Models
Step15: Decision Tree Classifier
Step16: Random Forest Classifier
Step17: Blagging Classifier
Step18: Base Estimator -> ExtraTrees
Step19: Evaluating Model Performance
Step20: Model Selection
Step21: Optimal Model
Step22: Optimizing Hyperparameters
|
13,957
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
Gu = nx.Graph()
for i, j in [(1, 2), (1, 4), (4, 2), (4, 3)]:
Gu.add_edge(i,j)
nx.draw(Gu, with_labels = True)
import networkx as nx
Gd = nx.DiGraph()
for i, j in [(1, 2), (1, 4), (4, 2), (4, 3)]:
Gd.add_edge(i,j)
nx.draw(Gd, with_labels = True)
nx.draw(Gu, with_labels = True)
nx.draw(Gd, with_labels = True)
import numpy as np
x = [1, 1, 1, 2, 2, 3]
np.mean(x), np.sum(x), np.std(x)
plt.hist(x)
plt.show()
from collections import defaultdict, Counter
freq = defaultdict(int)
for i in x:
freq[i] +=1
freq
freq_sum = np.sum(freq.values())
freq_sum
px = [float(i)/freq_sum for i in freq.values()]
px
plt.plot(freq.keys(), px, 'r-o')
plt.show()
plt.figure(1)
plt.subplot(121)
pos = nx.spring_layout(Gu) #定义一个布局,此处采用了spring布局方式
nx.draw(Gu, pos, with_labels = True)
plt.subplot(122)
nx.draw(Gd, pos, with_labels = True)
G1 = nx.complete_graph(4)
pos = nx.spring_layout(G1) #定义一个布局,此处采用了spring布局方式
nx.draw(G1, pos = pos, with_labels = True)
print(nx.transitivity(G1))
G2 = nx.Graph()
for i, j in [(1, 2), (1, 3), (1, 0), (3, 0)]:
G2.add_edge(i,j)
nx.draw(G2,pos = pos, with_labels = True)
print(nx.transitivity(G2))
G3 = nx.Graph()
for i, j in [(1, 2), (1, 3), (1, 0)]:
G3.add_edge(i,j)
nx.draw(G3, pos =pos, with_labels = True)
print(nx.transitivity(G3))
import networkx as nx
import matplotlib.pyplot as plt
BA= nx.random_graphs.barabasi_albert_graph(500,1) #生成n=20、m=1的BA无标度网络
pos = nx.spring_layout(BA) #定义一个布局,此处采用了spring布局方式
nx.draw(BA,pos,with_labels=False,node_size = 30) #绘制图形
plt.show()
nx.degree_histogram(BA)[:3]
BA.degree().items()[:3]
plt.hist(BA.degree().values())
plt.show()
def plotDegreeDistributionLongTail(G):
degs = defaultdict(int)
for i in G.degree().values(): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
plt.plot(x, y, 'b-o')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P_K$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
plotDegreeDistributionLongTail(BA)
def plotDegreeDistribution(G):
degs = defaultdict(int)
for i in G.degree().values(): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
plt.plot(x, y, 'b-o')
plt.xscale('log')
plt.yscale('log')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P_K$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
plotDegreeDistribution(BA)
BA= nx.random_graphs.barabasi_albert_graph(50000,10) #生成n=50000、m=1的BA无标度网络
plotDegreeDistribution(BA)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Directed
Step2: <img src = './img/networks.png' width = 1000>
Step3: Undirected network
Step4: Directed network
Step5: For a sample of N values
Step6: Average Degree
Step7: Undirected
Step8: THREE CENTRAL QUANTITIES IN NETWORK SCIENCE
|
13,958
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
g = 9.81 # m/s^2
l = 0.5 # length of pendulum, in meters
tmax = 50. # seconds
t = np.linspace(0, tmax, int(100*tmax))
def derivs(y, t, a, b, omega0):
Compute the derivatives of the damped, driven pendulum.
Parameters
----------
y : ndarray
The solution vector at the current time t[i]: [theta[i],omega[i]].
t : float
The current time t[i].
a, b, omega0: float
The parameters in the differential equation.
Returns
-------
dy : ndarray
The vector of derviatives at t[i]: [dtheta[i],domega[i]].
theta = y[0]
omega = y[1]
dtheta = omega
domega = (-g/l)*np.sin(theta) - a*omega - b*np.sin(omega0*t)
dy = np.array([dtheta,domega])
return dy
derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0)
assert np.allclose(derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0), [1.,-1.])
def energy(y):
Compute the energy for the state array y.
The state array y can have two forms:
1. It could be an ndim=1 array of np.array([theta,omega]) at a single time.
2. It could be an ndim=2 array where each row is the [theta,omega] at single
time.
Parameters
----------
y : ndarray, list, tuple
A solution vector
Returns
-------
E/m : float (ndim=1) or ndarray (ndim=2)
The energy per mass.
if np.ndim(y)==1:
theta = y[0]
omega = y[1]
elif np.ndim(y)==2:
theta = y[:,0]
omega = y[:,1]
Em = g*l*(1 - np.cos(theta)) + 0.5*(l**2)*(omega**2)
return Em
energy(energy(np.ones((10,2))))
assert np.allclose(energy(np.array([np.pi,0])),g)
assert np.allclose(energy(np.ones((10,2))), np.ones(10)*energy(np.array([1,1])))
a = 0
b = 0
omega0 = 0
ic = np.array([np.pi,0.0])
soln = odeint(derivs, ic, t, args=(a, b, omega0), atol=1e-5, rtol=1e-4)
theta = soln[:,0]
omega = soln[:,1]
Em = energy(soln)
theta, omega, Em
plt.figure(figsize=(5,3))
plt.plot(t, Em)
plt.ylim(0,12)
plt.xlabel('Time')
plt.ylabel('Energy/Mass')
plt.title('Energy/Mass vs. Time');
plt.figure(figsize=(5,3))
plt.plot(t, omega)
plt.ylim(-np.pi,np.pi)
plt.yticks([-np.pi,0,np.pi],['$-\pi$',0,'$\pi$'])
plt.xlabel('Time')
plt.ylabel('Angular Velocity')
plt.title('Angular Velocity vs. Time');
plt.figure(figsize=(5,3))
plt.plot(t, theta)
plt.ylim(0,2*np.pi)
plt.yticks([0,np.pi,2*np.pi],[0,'$\pi$','2$\pi$'])
plt.xlabel('Time')
plt.ylabel('Angular Position')
plt.title('Angular Position vs. Time');
assert True # leave this to grade the two plots and their tuning of atol, rtol.
def plot_pendulum(a=0.0, b=0.0, omega0=0.0):
Integrate the damped, driven pendulum and make a phase plot of the solution.
ic = np.array([-np.pi+0.1,0.0])
soln = odeint(derivs, ic, t, args=(a, b, omega0), atol=1e-9, rtol=1e-8)
theta = soln[:,0]
omega = soln[:,1]
plt.figure(figsize=(8, 5))
plt.plot(theta,omega)
plt.xlabel('Angular Position')
plt.ylabel('Angular Velocity')
plt.xlim(-2*np.pi,2*np.pi)
plt.ylim(-10,10)
plt.title('Angular Velocity vs. Angular Position')
plot_pendulum(0.5, 0.0, 0.0)
interact(plot_pendulum, a=(0.0,1.0,0.1), b=(0.0,10.0,0.1), omega0=(0.0,10.0,0.1));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Damped, driven nonlinear pendulum
Step4: Write a function derivs for usage with scipy.integrate.odeint that computes the derivatives for the damped, driven harmonic oscillator. The solution vector at each time will be $\vec{y}(t) = (\theta(t),\omega(t))$.
Step5: Simple pendulum
Step7: Damped pendulum
Step8: Here is an example of the output of your plot_pendulum function that should show a decaying spiral.
Step9: Use interact to explore the plot_pendulum function with
|
13,959
|
<ASSISTANT_TASK:>
Python Code:
help('learning_lab.03_interface_names')
from importlib import import_module
script = import_module('learning_lab.03_interface_names')
from inspect import getsource
print(getsource(script.main))
print(getsource(script.demonstrate))
run ../learning_lab/03_interface_names.py
from basics.odl_http import http_history
from basics.http import http_history_to_html
from IPython.core.display import HTML
HTML(http_history_to_html(http_history()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementation
Step2: Execution
Step3: HTTP
|
13,960
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mri', 'sandbox-3', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
13,961
|
<ASSISTANT_TASK:>
Python Code:
!curl -Lo conda_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import conda_installer
conda_installer.install()
!/root/miniconda/bin/conda info -e
!pip install --pre deepchem
import deepchem as dc
tasks, datasets, transformers = dc.molnet.load_tox21(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = datasets
n_tasks = len(tasks)
model = dc.models.GraphConvModel(n_tasks, mode='classification')
model.fit(train_dataset, nb_epoch=50)
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
print('Training set score:', model.evaluate(train_dataset, [metric], transformers))
print('Test set score:', model.evaluate(test_dataset, [metric], transformers))
from deepchem.models.layers import GraphConv, GraphPool, GraphGather
import tensorflow as tf
import tensorflow.keras.layers as layers
batch_size = 100
class MyGraphConvModel(tf.keras.Model):
def __init__(self):
super(MyGraphConvModel, self).__init__()
self.gc1 = GraphConv(128, activation_fn=tf.nn.tanh)
self.batch_norm1 = layers.BatchNormalization()
self.gp1 = GraphPool()
self.gc2 = GraphConv(128, activation_fn=tf.nn.tanh)
self.batch_norm2 = layers.BatchNormalization()
self.gp2 = GraphPool()
self.dense1 = layers.Dense(256, activation=tf.nn.tanh)
self.batch_norm3 = layers.BatchNormalization()
self.readout = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh)
self.dense2 = layers.Dense(n_tasks*2)
self.logits = layers.Reshape((n_tasks, 2))
self.softmax = layers.Softmax()
def call(self, inputs):
gc1_output = self.gc1(inputs)
batch_norm1_output = self.batch_norm1(gc1_output)
gp1_output = self.gp1([batch_norm1_output] + inputs[1:])
gc2_output = self.gc2([gp1_output] + inputs[1:])
batch_norm2_output = self.batch_norm1(gc2_output)
gp2_output = self.gp2([batch_norm2_output] + inputs[1:])
dense1_output = self.dense1(gp2_output)
batch_norm3_output = self.batch_norm3(dense1_output)
readout_output = self.readout([batch_norm3_output] + inputs[1:])
logits_output = self.logits(self.dense2(readout_output))
return self.softmax(logits_output)
model = dc.models.KerasModel(MyGraphConvModel(), loss=dc.models.losses.CategoricalCrossEntropy())
test_dataset.X[0]
from deepchem.metrics import to_one_hot
from deepchem.feat.mol_graphs import ConvMol
import numpy as np
def data_generator(dataset, epochs=1):
for ind, (X_b, y_b, w_b, ids_b) in enumerate(dataset.iterbatches(batch_size, epochs,
deterministic=False, pad_batches=True)):
multiConvMol = ConvMol.agglomerate_mols(X_b)
inputs = [multiConvMol.get_atom_features(), multiConvMol.deg_slice, np.array(multiConvMol.membership)]
for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
inputs.append(multiConvMol.get_deg_adjacency_lists()[i])
labels = [to_one_hot(y_b.flatten(), 2).reshape(-1, n_tasks, 2)]
weights = [w_b]
yield (inputs, labels, weights)
model.fit_generator(data_generator(train_dataset, epochs=50))
print('Training set score:', model.evaluate_generator(data_generator(train_dataset), [metric], transformers))
print('Test set score:', model.evaluate_generator(data_generator(test_dataset), [metric], transformers))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What are Graph Convolutions?
Step2: Let's now train a graph convolutional network on this dataset. DeepChem has the class GraphConvModel that wraps a standard graph convolutional architecture underneath the hood for user convenience. Let's instantiate an object of this class and train it on our dataset.
Step3: Let's try to evaluate the performance of the model we've trained. For this, we need to define a metric, a measure of model performance. dc.metrics holds a collection of metrics already. For this dataset, it is standard to use the ROC-AUC score, the area under the receiver operating characteristic curve (which measures the tradeoff between precision and recall). Luckily, the ROC-AUC score is already available in DeepChem.
Step4: The results are pretty good, and GraphConvModel is very easy to use. But what's going on under the hood? Could we build GraphConvModel ourselves? Of course! DeepChem provides Keras layers for all the calculations involved in a graph convolution. We are going to apply the following layers from DeepChem.
Step5: We can now see more clearly what is happening. There are two convolutional blocks, each consisting of a GraphConv, followed by batch normalization, followed by a GraphPool to do max pooling. We finish up with a dense layer, another batch normalization, a GraphGather to combine the data from all the different nodes, and a final dense layer to produce the global output.
Step6: What are the inputs to this model? A graph convolution requires a complete description of each molecule, including the list of nodes (atoms) and a description of which ones are bonded to each other. In fact, if we inspect the dataset we see that the feature array contains Python objects of type ConvMol.
Step7: Models expect arrays of numbers as their inputs, not Python objects. We must convert the ConvMol objects into the particular set of arrays expected by the GraphConv, GraphPool, and GraphGather layers. Fortunately, the ConvMol class includes the code to do this, as well as to combine all the molecules in a batch to create a single set of arrays.
Step8: Now, we can train the model using fit_generator(generator) which will use the generator we've defined to train the model.
Step9: Now that we have trained our graph convolutional method, let's evaluate its performance. We again have to use our defined generator to evaluate model performance.
|
13,962
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
from mne.report import Report
from mne.datasets import sample
from mne import read_evokeds
from matplotlib import pyplot as plt
data_path = sample.data_path()
meg_path = data_path + '/MEG/sample'
subjects_dir = data_path + '/subjects'
evoked_fname = meg_path + '/sample_audvis-ave.fif'
report = Report(image_format='png', subjects_dir=subjects_dir,
info_fname=evoked_fname, subject='sample',
raw_psd=False) # use False for speed here
report.parse_folder(meg_path, on_error='ignore', mri_decim=10)
# Load the evoked data
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(None, 0), verbose=False)
evoked.crop(0, .2)
times = evoked.times[::4]
# Create a list of figs for the slider
figs = list()
for t in times:
figs.append(evoked.plot_topomap(t, vmin=-300, vmax=300, res=100,
show=False))
plt.close(figs[-1])
report.add_slider_to_section(figs, times, 'Evoked Response',
image_format='png') # can also use 'svg'
# to save report
report.save('my_report.html', overwrite=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Do standard folder parsing (this can take a couple of minutes)
Step2: Add a custom section with an evoked slider
|
13,963
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cams', 'sandbox-2', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
13,964
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import rampy as rp
x = np.arange(0,100,1.0) # a dummy x axis
ref1 = 50.0*np.exp(-1/2*((x-40)/20)**2) + np.random.randn(len(x)) # a gaussian with added noise
ref2 = 70.0*np.exp(-1/2*((x-60)/15)**2) + np.random.randn(len(x)) # a gaussian with added noise
plt.figure()
plt.plot(x,ref1,label="ref1")
plt.plot(x,ref2,label="ref2")
plt.xlabel("X")
plt.ylabel("Y")
plt.legend()
F1_true = np.array([0.80,0.60,0.40,0.20])
obs = np.dot(ref1.reshape(-1,1),F1_true.reshape(1,-1)) + np.dot(ref2.reshape(-1,1),(1-F1_true.reshape(1,-1)))
plt.figure()
plt.plot(x,obs)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Observed signals")
F1_meas = rp.mixing_sp(obs,ref1,ref2)
plt.figure()
plt.plot(F1_true,F1_meas,'ro',label="Measurements")
plt.plot([0,1],[0,1],'k-',label="1:1 line")
plt.xlabel("True $F1$ value")
plt.ylabel("Determined $F1$ value")
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem setting
Step2: We now create 4 intermediate $obs$ signals, with $F1$ = 20%,40%,60% and 80% of ref1.
Step3: Now we can use rp.mixing_sp() to retrieve $F1$.
|
13,965
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
data = [446.6565, 454.4733, 455.663 , 423.6322, 456.2713, 440.5881, 425.3325, 485.1494, 506.0482, 526.792 , 514.2689, 494.211 ]
index= pd.DatetimeIndex(start='1996', end='2008', freq='A')
oildata = pd.Series(data, index)
oildata.index = pd.DatetimeIndex(oildata.index, freq=pd.infer_freq(oildata.index))
data = [17.5534, 21.86 , 23.8866, 26.9293, 26.8885, 28.8314, 30.0751, 30.9535, 30.1857, 31.5797, 32.5776, 33.4774, 39.0216, 41.3864, 41.5966]
index= pd.DatetimeIndex(start='1990', end='2005', freq='A')
air = pd.Series(data, index)
air.index = pd.DatetimeIndex(air.index, freq=pd.infer_freq(air.index))
data = [263.9177, 268.3072, 260.6626, 266.6394, 277.5158, 283.834 , 290.309 , 292.4742, 300.8307, 309.2867, 318.3311, 329.3724, 338.884 , 339.2441, 328.6006, 314.2554, 314.4597, 321.4138, 329.7893, 346.3852, 352.2979, 348.3705, 417.5629, 417.1236, 417.7495, 412.2339, 411.9468, 394.6971, 401.4993, 408.2705, 414.2428]
index= pd.DatetimeIndex(start='1970', end='2001', freq='A')
livestock2 = pd.Series(data, index)
livestock2.index = pd.DatetimeIndex(livestock2.index, freq=pd.infer_freq(livestock2.index))
data = [407.9979 , 403.4608, 413.8249, 428.105 , 445.3387, 452.9942, 455.7402]
index= pd.DatetimeIndex(start='2001', end='2008', freq='A')
livestock3 = pd.Series(data, index)
livestock3.index = pd.DatetimeIndex(livestock3.index, freq=pd.infer_freq(livestock3.index))
data = [41.7275, 24.0418, 32.3281, 37.3287, 46.2132, 29.3463, 36.4829, 42.9777, 48.9015, 31.1802, 37.7179, 40.4202, 51.2069, 31.8872, 40.9783, 43.7725, 55.5586, 33.8509, 42.0764, 45.6423, 59.7668, 35.1919, 44.3197, 47.9137]
index= pd.DatetimeIndex(start='2005', end='2010-Q4', freq='QS')
aust = pd.Series(data, index)
aust.index = pd.DatetimeIndex(aust.index, freq=pd.infer_freq(aust.index))
ax=oildata.plot()
ax.set_xlabel("Year")
ax.set_ylabel("Oil (millions of tonnes)")
plt.show()
print("Figure 7.1: Oil production in Saudi Arabia from 1996 to 2007.")
fit1 = SimpleExpSmoothing(oildata).fit(smoothing_level=0.2,optimized=False)
fcast1 = fit1.forecast(3).rename(r'$\alpha=0.2$')
fit2 = SimpleExpSmoothing(oildata).fit(smoothing_level=0.6,optimized=False)
fcast2 = fit2.forecast(3).rename(r'$\alpha=0.6$')
fit3 = SimpleExpSmoothing(oildata).fit()
fcast3 = fit3.forecast(3).rename(r'$\alpha=%s$'%fit3.model.params['smoothing_level'])
ax = oildata.plot(marker='o', color='black', figsize=(12,8))
fcast1.plot(marker='o', ax=ax, color='blue', legend=True)
fit1.fittedvalues.plot(marker='o', ax=ax, color='blue')
fcast2.plot(marker='o', ax=ax, color='red', legend=True)
fit2.fittedvalues.plot(marker='o', ax=ax, color='red')
fcast3.plot(marker='o', ax=ax, color='green', legend=True)
fit3.fittedvalues.plot(marker='o', ax=ax, color='green')
plt.show()
fit1 = Holt(air).fit(smoothing_level=0.8, smoothing_slope=0.2, optimized=False)
fcast1 = fit1.forecast(5).rename("Holt's linear trend")
fit2 = Holt(air, exponential=True).fit(smoothing_level=0.8, smoothing_slope=0.2, optimized=False)
fcast2 = fit2.forecast(5).rename("Exponential trend")
fit3 = Holt(air, damped=True).fit(smoothing_level=0.8, smoothing_slope=0.2)
fcast3 = fit3.forecast(5).rename("Additive damped trend")
ax = air.plot(color="black", marker="o", figsize=(12,8))
fit1.fittedvalues.plot(ax=ax, color='blue')
fcast1.plot(ax=ax, color='blue', marker="o", legend=True)
fit2.fittedvalues.plot(ax=ax, color='red')
fcast2.plot(ax=ax, color='red', marker="o", legend=True)
fit3.fittedvalues.plot(ax=ax, color='green')
fcast3.plot(ax=ax, color='green', marker="o", legend=True)
plt.show()
fit1 = SimpleExpSmoothing(livestock2).fit()
fit2 = Holt(livestock2).fit()
fit3 = Holt(livestock2,exponential=True).fit()
fit4 = Holt(livestock2,damped=True).fit(damping_slope=0.98)
fit5 = Holt(livestock2,exponential=True,damped=True).fit()
params = ['smoothing_level', 'smoothing_slope', 'damping_slope', 'initial_level', 'initial_slope']
results=pd.DataFrame(index=[r"$\alpha$",r"$\beta$",r"$\phi$",r"$l_0$","$b_0$","SSE"] ,columns=['SES', "Holt's","Exponential", "Additive", "Multiplicative"])
results["SES"] = [fit1.params[p] for p in params] + [fit1.sse]
results["Holt's"] = [fit2.params[p] for p in params] + [fit2.sse]
results["Exponential"] = [fit3.params[p] for p in params] + [fit3.sse]
results["Additive"] = [fit4.params[p] for p in params] + [fit4.sse]
results["Multiplicative"] = [fit5.params[p] for p in params] + [fit5.sse]
results
for fit in [fit2,fit4]:
pd.DataFrame(np.c_[fit.level,fit.slope]).rename(
columns={0:'level',1:'slope'}).plot(subplots=True)
plt.show()
print('Figure 7.4: Level and slope components for Holt’s linear trend method and the additive damped trend method.')
fit1 = SimpleExpSmoothing(livestock2).fit()
fcast1 = fit1.forecast(9).rename("SES")
fit2 = Holt(livestock2).fit()
fcast2 = fit2.forecast(9).rename("Holt's")
fit3 = Holt(livestock2, exponential=True).fit()
fcast3 = fit3.forecast(9).rename("Exponential")
fit4 = Holt(livestock2, damped=True).fit(damping_slope=0.98)
fcast4 = fit4.forecast(9).rename("Additive Damped")
fit5 = Holt(livestock2, exponential=True, damped=True).fit()
fcast5 = fit5.forecast(9).rename("Multiplicative Damped")
ax = livestock2.plot(color="black", marker="o", figsize=(12,8))
livestock3.plot(ax=ax, color="black", marker="o", legend=False)
fcast1.plot(ax=ax, color='red', legend=True)
fcast2.plot(ax=ax, color='green', legend=True)
fcast3.plot(ax=ax, color='blue', legend=True)
fcast4.plot(ax=ax, color='cyan', legend=True)
fcast5.plot(ax=ax, color='magenta', legend=True)
ax.set_ylabel('Livestock, sheep in Asia (millions)')
plt.show()
print('Figure 7.5: Forecasting livestock, sheep in Asia: comparing forecasting performance of non-seasonal methods.')
fit1 = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='add').fit(use_boxcox=True)
fit2 = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='mul').fit(use_boxcox=True)
fit3 = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='add', damped=True).fit(use_boxcox=True)
fit4 = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='mul', damped=True).fit(use_boxcox=True)
results=pd.DataFrame(index=[r"$\alpha$",r"$\beta$",r"$\phi$",r"$\gamma$",r"$l_0$","$b_0$","SSE"])
params = ['smoothing_level', 'smoothing_slope', 'damping_slope', 'smoothing_seasonal', 'initial_level', 'initial_slope']
results["Additive"] = [fit1.params[p] for p in params] + [fit1.sse]
results["Multiplicative"] = [fit2.params[p] for p in params] + [fit2.sse]
results["Additive Dam"] = [fit3.params[p] for p in params] + [fit3.sse]
results["Multiplica Dam"] = [fit4.params[p] for p in params] + [fit4.sse]
ax = aust.plot(figsize=(10,6), marker='o', color='black', title="Forecasts from Holt-Winters' multiplicative method" )
ax.set_ylabel("International visitor night in Australia (millions)")
ax.set_xlabel("Year")
fit1.fittedvalues.plot(ax=ax, style='--', color='red')
fit2.fittedvalues.plot(ax=ax, style='--', color='green')
fit1.forecast(8).rename('Holt-Winters (add-add-seasonal)').plot(ax=ax, style='--', marker='o', color='red', legend=True)
fit2.forecast(8).rename('Holt-Winters (add-mul-seasonal)').plot(ax=ax, style='--', marker='o', color='green', legend=True)
plt.show()
print("Figure 7.6: Forecasting international visitor nights in Australia using Holt-Winters method with both additive and multiplicative seasonality.")
results
df = pd.DataFrame(np.c_[aust, fit1.level, fit1.slope, fit1.season, fit1.fittedvalues],
columns=[r'$y_t$',r'$l_t$',r'$b_t$',r'$s_t$',r'$\hat{y}_t$'],index=aust.index)
df.append(fit1.forecast(8).rename(r'$\hat{y}_t$').to_frame(), sort=True)
df = pd.DataFrame(np.c_[aust, fit2.level, fit2.slope, fit2.season, fit2.fittedvalues],
columns=[r'$y_t$',r'$l_t$',r'$b_t$',r'$s_t$',r'$\hat{y}_t$'],index=aust.index)
df.append(fit2.forecast(8).rename(r'$\hat{y}_t$').to_frame(), sort=True)
states1 = pd.DataFrame(np.c_[fit1.level, fit1.slope, fit1.season], columns=['level','slope','seasonal'], index=aust.index)
states2 = pd.DataFrame(np.c_[fit2.level, fit2.slope, fit2.season], columns=['level','slope','seasonal'], index=aust.index)
fig, [[ax1, ax4],[ax2, ax5], [ax3, ax6]] = plt.subplots(3, 2, figsize=(12,8))
states1[['level']].plot(ax=ax1)
states1[['slope']].plot(ax=ax2)
states1[['seasonal']].plot(ax=ax3)
states2[['level']].plot(ax=ax4)
states2[['slope']].plot(ax=ax5)
states2[['seasonal']].plot(ax=ax6)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple Exponential Smoothing
Step2: Here we run three variants of simple exponential smoothing
Step3: Holt's Method
Step4: Seasonally adjusted data
Step5: Plots of Seasonally Adjusted Data
Step6: Comparison
Step7: Holt's Winters Seasonal
Step8: The Internals
Step9: Finally lets look at the levels, slopes/trends and seasonal components of the models.
|
13,966
|
<ASSISTANT_TASK:>
Python Code:
import pickle
import os
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.model_selection as skms
import sklearn.linear_model as skl
import sklearn.metrics as skm
import tqdm
import copy
import time
from IPython.display import display
%matplotlib inline
sns.set(color_codes=True)
%%javascript
// Fill entire cell with output
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;
}
filenames = glob.glob(os.path.join("..", "dat", "*"))
label_filename = list(filter(lambda x: "label" in x, filenames))[0]
dat_filename = list(filter(lambda x: "label" not in x, filenames))[0]
# Read in the labels
with open(label_filename, "rb") as f:
labels = pickle.load(f)
# Read in the corresponding configurations
with open(dat_filename, "rb") as f:
data = np.unpackbits(pickle.load(f)).reshape(-1, 1600).astype("int")
# Set spin-down to -1
data[data == 0] = -1
# Set up slices of the dataset
ordered = slice(0, 70000)
critical = slice(70000, 100000)
disordered = slice(100000, 160000)
X_train, X_test, y_train, y_test = skms.train_test_split(
np.concatenate((data[ordered], data[disordered])),
np.concatenate((labels[ordered], labels[disordered])),
test_size=0.95
)
lambdas = np.logspace(-7, -1, 7)
param_grid = {
"C": list(1.0/lambdas),
"penalty": ["l1", "l2"]
}
clf = skms.GridSearchCV(
skl.LogisticRegression(),
param_grid=param_grid,
n_jobs=-1,
return_train_score=True
)
t0 = time.time()
clf.fit(X_train, y_train)
t1 = time.time()
print (
"Time spent fitting GridSearchCV(LogisticRegression): {0:.3f} sec".format(
t1 - t0
)
)
logreg_df = pd.DataFrame(clf.cv_results_)
display(logreg_df)
train_accuracy = skm.accuracy_score(y_train, clf.predict(X_train))
test_accuracy = skm.accuracy_score(y_test, clf.predict(X_test))
critical_accuracy = skm.accuracy_score(labels[critical], clf.predict(data[critical]))
print ("Accuracy on train data: {0}".format(train_accuracy))
print ("Accuracy on test data: {0}".format(test_accuracy))
print ("Accuracy on critical data: {0}".format(critical_accuracy))
fig = plt.figure(figsize=(20, 14))
for (_X, _y), label in zip(
[
(X_train, y_train),
(X_test, y_test),
(data[critical], labels[critical])
],
["Train", "Test", "Critical"]
):
proba = clf.predict_proba(_X)
fpr, tpr, _ = skm.roc_curve(_y, proba[:, 1])
roc_auc = skm.auc(fpr, tpr)
print ("LogisticRegression AUC ({0}): {1}".format(label, roc_auc))
plt.plot(fpr, tpr, label="{0} (AUC = {1})".format(label, roc_auc), linewidth=4.0)
plt.plot([0, 1], [0, 1], "--", label="Guessing (AUC = 0.5)", linewidth=4.0)
plt.title(r"The ROC curve for LogisticRegression", fontsize=18)
plt.xlabel(r"False positive rate", fontsize=18)
plt.ylabel(r"True positive rate", fontsize=18)
plt.axis([-0.01, 1.01, -0.01, 1.01])
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.legend(loc="best", fontsize=18)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading in the data
Step2: This dataset consists of $10000$ samples, i.e., $10000$ spin configurations with $40 \times 40$ spins each, for $16$ temperatures between $0.25$ to $4.0$. Next we create a train/test-split and keep the data in the critical phase as a separate dataset for extrapolation-testing.
Step3: Note
Step4: We can see that logistic regression is quite slow and using the grid search and cross validation results in quite a heavy computation. Below we show the results of the different configurations.
Step5: Accuracy of a classification model
Step6: We can see that we get quite good accuracy on the training data, but gradually worsening accuracy on the test and critical data.
|
13,967
|
<ASSISTANT_TASK:>
Python Code:
import twitter
CONSUMER_KEY =
CONSUMER_SECRET =
OAUTH_TOKEN =
OAUTH_TOKEN_SECRET =
# let's do the Oauth dance!
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
# success if object created
print(twitter_api)
WORLD_WOE_ID = 1
US_WOE_ID = 23424977
world_trends = twitter_api.trends.place(_id=WORLD_WOE_ID)
us_trends = twitter_api.trends.place(_id=US_WOE_ID)
print(world_trends)
print(us_trends)
type(us_trends)
len(us_trends)
import json
print(json.dumps(world_trends, indent=True))
len(world_trends)
type(world_trends[0])
world_trends[0].keys() # last three are just a record of your request
world_trends[0]['trends'][5]
print(json.dumps(us_trends, indent=True))
world_trends_set = set([trend['name'] for trend in world_trends[0]['trends']])
us_trends_set = set([trend['name'] for trend in us_trends[0]['trends']])
common_trends = world_trends_set.intersection(us_trends_set)
print(common_trends)
q = '#deeplearning'
count = 100
search_results = twitter_api.search.tweets(q=q, count=count)
type(search_results) # surprise! It's a dict
search_results.keys()
statuses = search_results['statuses']
metadata = search_results['search_metadata']
type(statuses)
len(statuses)
type(statuses[0])
type(metadata)
metadata.keys()
for _ in range(5):
print("Length of statuses", len(statuses))
try:
next_results = metadata['next_results']
except KeyError as e: # no more results when next_results doesn't exist
break
kwargs = dict([ kv.split('=') for kv in next_results[1:].split("&")])
print(kwargs) # to see what's happening
search_results = twitter_api.search.tweets(**kwargs)
statuses += search_results['statuses']
# show one sample result
t = statuses[0]
print(json.dumps(t, indent=True))
t['entities']['urls']
status_texts = [status['text'] for status in statuses]
screen_names = [user_mention['screen_name'] for status in statuses
for user_mention in status['entities']['user_mentions']]
hashtags = [hashtag['text'] for status in statuses for hashtag in status['entities']['hashtags']]
print(json.dumps(status_texts[:5], indent=True))
print(json.dumps(screen_names[:5], indent=True))
print(json.dumps(hashtags[:5], indent=True))
from collections import Counter
for item in [screen_names, hashtags]:
c = Counter(item)
print(c.most_common()[:10])
# big list comprehension
retweets = [
# Store out a tuple of these three values ...
(status['retweet_count'],
status['retweeted_status']['user']['screen_name'],
status['text'])
for status in statuses
# ... so long as the status meets this condition.
if 'retweeted_status' in status
]
sorted(retweets, reverse=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Twitter uses Where On Earth identifiers for places - see http
Step2: The format above is json (javascript object notation). You can read about json format at http
|
13,968
|
<ASSISTANT_TASK:>
Python Code:
!pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro
# first, we need some imports
import os
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from jax import numpy as jnp
from jax import random
from jax.scipy.special import expit
import numpyro
from numpyro import distributions as dist
from numpyro.distributions import constraints
from numpyro.infer import MCMC, NUTS, Predictive
plt.style.use("seaborn")
if "NUMPYRO_SPHINXBUILD" in os.environ:
set_matplotlib_formats("svg")
assert numpyro.__version__.startswith("0.9.2")
train_df = pd.read_csv(
"https://raw.githubusercontent.com/agconti/kaggle-titanic/master/data/train.csv"
)
train_df.info()
train_df.head()
for col in ["Survived", "Pclass", "Sex", "SibSp", "Parch", "Embarked"]:
print(train_df[col].value_counts(), end="\n\n")
train_df.SibSp.clip(0, 1, inplace=True)
train_df.Parch.clip(0, 2, inplace=True)
train_df.Embarked.fillna("S", inplace=True)
train_df.Name.str.split(", ").str.get(1).str.split(" ").str.get(0).value_counts()
train_df["Title"] = (
train_df.Name.str.split(", ")
.str.get(1)
.str.split(" ")
.str.get(0)
.apply(lambda x: x if x in ["Mr.", "Miss.", "Mrs.", "Master."] else "Misc.")
)
title_cat = pd.CategoricalDtype(
categories=["Mr.", "Miss.", "Mrs.", "Master.", "Misc."], ordered=True
)
embarked_cat = pd.CategoricalDtype(categories=["S", "C", "Q"], ordered=True)
age_mean, age_std = train_df.Age.mean(), train_df.Age.std()
data = dict(
age=train_df.Age.pipe(lambda x: (x - age_mean) / age_std).values,
pclass=train_df.Pclass.values - 1,
title=train_df.Title.astype(title_cat).cat.codes.values,
sex=(train_df.Sex == "male").astype(int).values,
sibsp=train_df.SibSp.values,
parch=train_df.Parch.values,
embarked=train_df.Embarked.astype(embarked_cat).cat.codes.values,
)
survived = train_df.Survived.values
# compute the age mean for each title
age_notnan = data["age"][jnp.isfinite(data["age"])]
title_notnan = data["title"][jnp.isfinite(data["age"])]
age_mean_by_title = jnp.stack([age_notnan[title_notnan == i].mean() for i in range(5)])
def model(
age, pclass, title, sex, sibsp, parch, embarked, survived=None, bayesian_impute=True
):
b_pclass = numpyro.sample("b_Pclass", dist.Normal(0, 1).expand([3]))
b_title = numpyro.sample("b_Title", dist.Normal(0, 1).expand([5]))
b_sex = numpyro.sample("b_Sex", dist.Normal(0, 1).expand([2]))
b_sibsp = numpyro.sample("b_SibSp", dist.Normal(0, 1).expand([2]))
b_parch = numpyro.sample("b_Parch", dist.Normal(0, 1).expand([3]))
b_embarked = numpyro.sample("b_Embarked", dist.Normal(0, 1).expand([3]))
# impute age by Title
isnan = np.isnan(age)
age_nanidx = np.nonzero(isnan)[0]
if bayesian_impute:
age_mu = numpyro.sample("age_mu", dist.Normal(0, 1).expand([5]))
age_mu = age_mu[title]
age_sigma = numpyro.sample("age_sigma", dist.Normal(0, 1).expand([5]))
age_sigma = age_sigma[title]
age_impute = numpyro.sample(
"age_impute",
dist.Normal(age_mu[age_nanidx], age_sigma[age_nanidx]).mask(False),
)
age = jnp.asarray(age).at[age_nanidx].set(age_impute)
numpyro.sample("age", dist.Normal(age_mu, age_sigma), obs=age)
else:
# fill missing data by the mean of ages for each title
age_impute = age_mean_by_title[title][age_nanidx]
age = jnp.asarray(age).at[age_nanidx].set(age_impute)
a = numpyro.sample("a", dist.Normal(0, 1))
b_age = numpyro.sample("b_Age", dist.Normal(0, 1))
logits = a + b_age * age
logits = logits + b_title[title] + b_pclass[pclass] + b_sex[sex]
logits = logits + b_sibsp[sibsp] + b_parch[parch] + b_embarked[embarked]
numpyro.sample("survived", dist.Bernoulli(logits=logits), obs=survived)
mcmc = MCMC(NUTS(model), num_warmup=1000, num_samples=1000)
mcmc.run(random.PRNGKey(0), **data, survived=survived)
mcmc.print_summary()
age_by_title = age_mean + age_std * mcmc.get_samples()["age_mu"].mean(axis=0)
dict(zip(title_cat.categories, age_by_title))
train_df.groupby("Title")["Age"].mean()
posterior = mcmc.get_samples()
survived_pred = Predictive(model, posterior)(random.PRNGKey(1), **data)["survived"]
survived_pred = (survived_pred.mean(axis=0) >= 0.5).astype(jnp.uint8)
print("Accuracy:", (survived_pred == survived).sum() / survived.shape[0])
confusion_matrix = pd.crosstab(
pd.Series(survived, name="actual"), pd.Series(survived_pred, name="predict")
)
confusion_matrix / confusion_matrix.sum(axis=1)
mcmc.run(random.PRNGKey(2), **data, survived=survived, bayesian_impute=False)
posterior_1 = mcmc.get_samples()
survived_pred_1 = Predictive(model, posterior_1)(random.PRNGKey(2), **data)["survived"]
survived_pred_1 = (survived_pred_1.mean(axis=0) >= 0.5).astype(jnp.uint8)
print("Accuracy:", (survived_pred_1 == survived).sum() / survived.shape[0])
confusion_matrix = pd.crosstab(
pd.Series(survived, name="actual"), pd.Series(survived_pred_1, name="predict")
)
confusion_matrix / confusion_matrix.sum(axis=1)
confusion_matrix = pd.crosstab(
pd.Series(survived, name="actual"), pd.Series(survived_pred_1, name="predict")
)
confusion_matrix / confusion_matrix.sum(axis=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dataset
Step2: Look at the data info, we know that there are missing data at Age, Cabin, and Embarked columns. Although Cabin is an important feature (because the position of a cabin in the ship can affect the chance of people in that cabin to survive), we will skip it in this tutorial for simplicity. In the dataset, there are many categorical columns and two numerical columns Age and Fare. Let's first look at the distribution of those categorical columns
Step3: Prepare data
Step4: Looking closer at the data, we can observe that each name contains a title. We know that age is correlated with the title of the name
Step5: We will make a new column Title, where rare titles are merged into one group Misc..
Step6: Now, it is ready to turn the dataframe, which includes categorical values, into numpy arrays. We also perform standardization (a good practice for regression models) for Age column.
Step7: Modelling
Step8: Note that in the model, the prior for age is dist.Normal(age_mu, age_sigma), where the values of age_mu and age_sigma depend on title. Because there are missing values in age, we will encode those missing values in the latent parameter age_impute. Then we can replace NaN entries in age with the vector age_impute.
Step9: To double check that the assumption "age is correlated with title" is reasonable, let's look at the infered age by title. Recall that we performed standarization on age, so here we need to scale back to original domain.
Step10: The infered result confirms our assumption that Age is correlated with Title
Step11: So far so good, we have many information about the regression coefficients together with imputed values and their uncertainties. Let's inspect those results a bit
Step12: This is a pretty good result using a simple logistic regression model. Let's see how the model performs if we don't use Bayesian imputation here.
|
13,969
|
<ASSISTANT_TASK:>
Python Code:
from stix2 import Identity
Identity(name="John Smith",
identity_class="individual",
x_foo="bar")
identity = Identity(name="John Smith",
identity_class="individual",
custom_properties={
"x_foo": "bar"
})
print(identity.serialize(pretty=True))
identity2 = Identity(name="John Smith",
identity_class="individual",
x_foo="bar",
allow_custom=True)
print(identity2.serialize(pretty=True))
from stix2 import parse
input_string = {
"type": "identity",
"spec_version": "2.1",
"id": "identity--311b2d2d-f010-4473-83ec-1edf84858f4c",
"created": "2015-12-21T19:59:11Z",
"modified": "2015-12-21T19:59:11Z",
"name": "John Smith",
"identity_class": "individual",
"x_foo": "bar"
}
identity3 = parse(input_string, allow_custom=True)
print(identity3.x_foo)
identity4 = identity3.new_version(x_foo=None)
print(identity4.serialize(pretty=True))
from stix2 import CustomObject, properties
@CustomObject('x-animal', [
('species', properties.StringProperty(required=True)),
('animal_class', properties.StringProperty()),
])
class Animal(object):
def __init__(self, animal_class=None, **kwargs):
if animal_class and animal_class not in ['mammal', 'bird', 'fish', 'reptile']:
raise ValueError("'%s' is not a recognized class of animal." % animal_class)
animal = Animal(species="lion",
animal_class="mammal")
print(animal.serialize(pretty=True))
Animal(species="xenomorph",
animal_class="alien")
input_string2 = {
"type": "x-animal",
"id": "x-animal--941f1471-6815-456b-89b8-7051ddf13e4b",
"created": "2015-12-21T19:59:11Z",
"modified": "2015-12-21T19:59:11Z",
"spec_version": "2.1",
"species": "shark",
"animal_class": "fish"
}
animal2 = parse(input_string2)
print(animal2.species)
input_string3 = {
"type": "x-foobar",
"id": "x-foobar--d362beb5-a04e-4e6b-a030-b6935122c3f9",
"created": "2015-12-21T19:59:11Z",
"modified": "2015-12-21T19:59:11Z",
"bar": 1,
"baz": "frob"
}
parse(input_string3)
from stix2 import CustomObservable
@CustomObservable('x-new-observable', [
('a_property', properties.StringProperty(required=True)),
('property_2', properties.IntegerProperty()),
])
class NewObservable():
pass
new_observable = NewObservable(a_property="something",
property_2=10)
print(new_observable.serialize(pretty=True))
from stix2 import ObservedData
input_string4 = {
"type": "observed-data",
"id": "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
"spec_version": "2.1",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"created": "2016-04-06T19:58:16.000Z",
"modified": "2016-04-06T19:58:16.000Z",
"first_observed": "2015-12-21T19:00:00Z",
"last_observed": "2015-12-21T19:00:00Z",
"number_observed": 50,
"objects": {
"0": {
"type": "x-new-observable",
"a_property": "foobaz",
"property_2": 5
}
}
}
obs_data = parse(input_string4)
print(obs_data.objects["0"].a_property)
print(obs_data.objects["0"].property_2)
from stix2 import CustomObservable
@CustomObservable('x-new-observable-2', [
('a_property', properties.StringProperty(required=True)),
('property_2', properties.IntegerProperty()),
], [
'a_property'
])
class NewObservable2():
pass
new_observable_a = NewObservable2(a_property="A property", property_2=2000)
print(new_observable_a.serialize(pretty=True))
new_observable_b = NewObservable2(a_property="A property", property_2=3000)
print(new_observable_b.serialize(pretty=True))
new_observable_c = NewObservable2(a_property="A different property", property_2=3000)
print(new_observable_c.serialize(pretty=True))
from stix2 import CustomExtension
@CustomExtension('x-new-ext', [
('property1', properties.StringProperty(required=True)),
('property2', properties.IntegerProperty()),
])
class NewExtension():
pass
new_ext = NewExtension(property1="something",
property2=10)
print(new_ext.serialize(pretty=True))
input_string5 = {
"type": "observed-data",
"id": "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
"spec_version": "2.1",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"created": "2016-04-06T19:58:16.000Z",
"modified": "2016-04-06T19:58:16.000Z",
"first_observed": "2015-12-21T19:00:00Z",
"last_observed": "2015-12-21T19:00:00Z",
"number_observed": 50,
"objects": {
"0": {
"type": "file",
"name": "foo.bar",
"hashes": {
"SHA-256": "35a01331e9ad96f751278b891b6ea09699806faedfa237d40513d92ad1b7100f"
},
"extensions": {
"x-new-ext": {
"property1": "bla",
"property2": 50
}
}
}
}
}
obs_data2 = parse(input_string5)
print(obs_data2.objects["0"].extensions["x-new-ext"].property1)
print(obs_data2.objects["0"].extensions["x-new-ext"].property2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To create a STIX object with one or more custom properties, pass them in as a dictionary parameter called custom_properties
Step2: Alternatively, setting allow_custom to True will allow custom properties without requiring a custom_properties dictionary.
Step4: Likewise, when parsing STIX content with custom properties, pass allow_custom=True to parse()
Step5: To remove a custom properties, use new_version() and set that property to None.
Step6: Custom STIX Object Types
Step7: Now we can create an instance of our custom Animal type.
Step8: Trying to create an Animal instance with an animal_class that's not in the list will result in an error
Step10: Parsing custom object types that you have already defined is simple and no different from parsing any other STIX object.
Step12: However, parsing custom object types which you have not defined will result in an error
Step13: Custom Cyber Observable Types
Step15: Likewise, after the custom Cyber Observable type has been defined, it can be parsed.
Step16: ID-Contributing Properties for Custom Cyber Observables
Step17: In this example, a_property is the only id-contributing property. Notice that the ID for new_observable_a and new_observable_b is the same since they have the same value for the id-contributing a_property property.
Step19: Once the custom Cyber Observable extension has been defined, it can be parsed.
|
13,970
|
<ASSISTANT_TASK:>
Python Code:
np.random.seed(0)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
plt.scatter(X_xor[y_xor==1, 0], X_xor[y_xor==1, 1], c='b', marker='o', label='1', s=100)
plt.scatter(X_xor[y_xor==-1, 0], X_xor[y_xor==-1, 1], c='r', marker='s', label='-1', s=100)
plt.ylim(-3.0)
plt.legend()
plt.title("XOR problem")
plt.show()
def plot_xor(X, y, model, title, xmin=-3, xmax=3, ymin=-3, ymax=3):
XX, YY = np.meshgrid(np.arange(xmin, xmax, (xmax-xmin)/1000), np.arange(ymin, ymax, (ymax-ymin)/1000))
ZZ = np.reshape(model.predict(np.array([XX.ravel(), YY.ravel()]).T), XX.shape)
plt.contourf(XX, YY, ZZ, cmap=mpl.cm.Paired_r, alpha=0.5)
plt.scatter(X[y== 1, 0], X[y== 1, 1], c='b', marker='o', label='+1', s=100)
plt.scatter(X[y==-1, 0], X[y==-1, 1], c='r', marker='s', label='-1', s=100)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.title(title)
plt.show()
from sklearn.svm import SVC
svc = SVC(kernel='linear').fit(X_xor, y_xor)
plot_xor(X_xor, y_xor, svc, 'Linear SVC')
from sklearn.preprocessing import FunctionTransformer
def basis(X):
return np.vstack([X[:, 0]**2, np.sqrt(2)*X[:, 0]*X[:, 1], X[:, 1]**2]).T
X = np.arange(8).reshape(4, 2)
X
FunctionTransformer(basis).fit_transform(X)
X_xor2 = FunctionTransformer(basis).fit_transform(X_xor)
plt.scatter(X_xor2[y_xor== 1, 0], X_xor2[y_xor== 1, 1], c="b", s=50)
plt.scatter(X_xor2[y_xor==-1, 0], X_xor2[y_xor==-1, 1], c="r", s=50)
plt.show()
from sklearn.pipeline import Pipeline
basismodel = Pipeline([('basis', FunctionTransformer(basis)), ('svc', SVC(kernel='linear'))]).fit(X_xor, y_xor)
plot_xor(X_xor, y_xor, basismodel, 'Basis-Function SVC')
polysvc = SVC(kernel='poly', degree=2, gamma=1, coef0=0).fit(X_xor, y_xor)
plot_xor(X_xor, y_xor, polysvc, 'Polynomial SVC')
rbfsvc = SVC(kernel='rbf').fit(X_xor, y_xor)
plot_xor(X_xor, y_xor, rbfsvc, 'RBF SVC')
sigmoidsvc = SVC(kernel='sigmoid', gamma=2, coef0=2).fit(X_xor, y_xor)
plot_xor(X_xor, y_xor, sigmoidsvc, 'Sigmoid SVC')
plot_xor(X_xor, y_xor, SVC(kernel='rbf', gamma=2).fit(X_xor, y_xor), 'RBF SVC (gamma=2)')
plot_xor(X_xor, y_xor, SVC(kernel='rbf', gamma=10).fit(X_xor, y_xor), 'RBF SVC (gamma=10)')
plot_xor(X_xor, y_xor, SVC(kernel="rbf", gamma=50).fit(X_xor, y_xor), "RBF SVC (gamma=50)")
plot_xor(X_xor, y_xor, SVC(kernel="rbf", gamma=100).fit(X_xor, y_xor), "RBF SVC (gamma=100)")
from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
iris = load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
X_combined_std = np.vstack((X_train_std, X_test_std))
X_combined = np.hstack((y_train, y_test))
def plot_iris(X, y, model, title, xmin=-2.5, xmax=2.5, ymin=-2.5, ymax=2.5):
XX, YY = np.meshgrid(np.arange(xmin, xmax, (xmax-xmin)/1000), np.arange(ymin, ymax, (ymax-ymin)/1000))
ZZ = np.reshape(model.predict(np.array([XX.ravel(), YY.ravel()]).T), XX.shape)
plt.contourf(XX, YY, ZZ, cmap=mpl.cm.Paired_r, alpha=0.5)
plt.scatter(X[y==0, 0], X[y==0, 1], c='r', marker='^', label='0', s=100)
plt.scatter(X[y==1, 0], X[y==1, 1], c='g', marker='o', label='1', s=100)
plt.scatter(X[y==2, 0], X[y==2, 1], c='b', marker='s', label='2', s=100)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.title(title)
plt.show()
model = SVC(kernel='linear').fit(X_test_std, y_test)
plot_iris(X_test_std, y_test, model, 'Linear SVC')
model = SVC(kernel='poly', random_state=0, gamma=10, C=1.0).fit(X_test_std, y_test)
plot_iris(X_test_std, y_test, model, 'RBF SVC (gamma=10, C=1)')
model = SVC(kernel='rbf', random_state=0, gamma=1, C=1.0).fit(X_test_std, y_test)
plot_iris(X_test_std, y_test, model, "RBF SVC (gamma=1, C=1)")
np.random.seed(0)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
plt.scatter(X_xor[y_xor==1,0], X_xor[y_xor==1,1], c='b', marker='o', label='1', s=100)
plt.scatter(X_xor[y_xor==-1,0], X_xor[y_xor==-1,1], c='r', marker='s', label='-1', s=100)
plt.ylim(-3.0)
plt.legend()
plt.title("XOR problem")
plt.show()
def plot_xor(model, title="", X=X_xor, y=y_xor, xmin=-3, xmax=3, ymin=-3, ymax=3):
XX, YY = np.meshgrid(np.arange(xmin, xmax, (xmax-xmin)/1000), np.arange(ymin, ymax, (ymax-ymin)/1000))
ZZ = np.reshape(model.predict(np.array([XX.ravel(), YY.ravel()]).T), XX.shape)
plt.contourf(XX, YY, ZZ, cmap=mpl.cm.Paired_r, alpha=0.5)
plt.scatter(X[y== 1, 0], X[y== 1, 1], c='b', marker='o', label='+1', s=100)
plt.scatter(X[y==-1, 0], X[y==-1, 1], c='r', marker='s', label='-1', s=100)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.title(title)
plt.show()
def plot_xor(model, title="", X=X_xor, y=y_xor, xmin=-3, xmax=3, ymin=-3, ymax=3):
XX, YY = np.meshgrid(np.arange(xmin, xmax, (xmax-xmin)/1000), np.arange(ymin, ymax, (ymax-ymin)/1000))
ZZ = np.reshape(model.predict(np.array([XX.ravel(), YY.ravel()]).T), XX.shape)
plt.contourf(XX, YY, ZZ, cmap=mpl.cm.Paired_r, alpha=0.5)
plt.scatter(X[y== 1, 0], X[y== 1, 1], c='b', marker='o', label='+1', s=100)
plt.scatter(X[y==-1, 0], X[y==-1, 1], c='r', marker='s', label='-1', s=100)
plt.scatter(model.support_vectors_[y[model.support_]==1, 0], model.support_vectors_[y[model.support_]==1, 1],
c='b', marker='o', s=150, linewidth=3, facecolor=None)
plt.scatter(model.support_vectors_[y[model.support_]==-1, 0], model.support_vectors_[y[model.support_]==-1, 1],
c='r', marker='s', s=150, linewidth=3, facecolor=None)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.title(title)
plt.show()
#연습
from sklearn.svm import SVC
rbfsvc = SVC(kernel="rbf", C=1).fit(X_xor, y_xor)
XX, YY = np.meshgrid(np.arange(-3, 3, 6/1000), np.arange(-3, 3, 6/1000))
ZZ = np.reshape(rbfsvc.predict(np.array([XX.ravel(), YY.ravel()]).T), XX.shape)
plt.contourf(XX, YY, ZZ, cmap=mpl.cm.Paired_r, alpha=0.5)
plt.scatter(X_xor[y_xor== 1, 0], X_xor[y_xor== 1, 1], c='b', marker='o', label='+1', s=100)
plt.scatter(X_xor[y_xor==-1, 0], X_xor[y_xor==-1, 1], c='r', marker='s', label='-1', s=100)
plt.scatter(rbfsvc.support_vectors_[y_xor[rbfsvc.support_]==1, 0], rbfsvc.support_vectors_[y_xor[rbfsvc.support_]==1, 1],
c='b', marker='o', s=150, linewidth=3, facecolor=None)
rbfsvc = SVC(kernel="rbf", gamma=100, C=100).fit(X_xor, y_xor)
#감마가 커질수록 서포트가 많아진다. 다 바운더리에 영향을 미친다.
#그래서 모양이 구불구불해지는 것이다. 100까지 갔을 경우에 엄청 구불구불해지는 것인데 그럴 수록 오버피팅이 커지는 것이다.
#RBF는 모양 예측이 쉽다. 영역별로 뭉쳐지는 바운더리로 보면 되고 그것을 조정하는 것이 감마이다.
#RBF는 기저벡터를 무한대의 차원으로 바라보고 잡아낼 수 있다. 무한대의 basis function으로 쪼갤 수 있다는 점
plot_xor(rbfsvc)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 기저 함수를 사용한 비선형 판별 모형
Step2: 커널 트릭
Step3: 커널의 의미
Step4: 커널 파라미터
Step5: 예
Step6: KSVM에서 사실 가장 많이 쓰이는 커널함수는 RBF이다.
|
13,971
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.stats import uniform
f = lambda x: np.log(x)
x = np.linspace(0.1, 5.1, 100)
y = f(x)
Eps = uniform.rvs(-1., 2., size=(100,))
plt.plot(x, y, label='$f(x)$', lw=3)
plt.scatter(x, y + Eps, label='y')
plt.xlabel('x')
plt.legend(loc='best')
plt.show()
models = ['Subset selection lasso', 'least squares', 'generalized additive model trees',
'bagging, boosting', 'support vector machines']
pos = [(0, 1), (0.2, 0.8), (0.4, 0.6), (0.6, 0.1), (0.7, 0.3)]
xlabels = ['Restrictive', 'Flexible']
ylabels = ['Low', 'High']
plt.figure(figsize=(10, 7))
for m, p in zip(models, pos):
plt.text(p[0]+ 0.02, p[1]-0.05, m, size=16)
plt.xticks([0.07, 0.95], xlabels, size=16)
plt.yticks([0, 1], ylabels, size=16)
plt.ylabel('Interpretability', size=20)
plt.xlabel('Flexibility', size=20)
plt.show()
x = np.linspace(0., 1.2, 5)
plt.scatter(x[0:4], [0.1, 0.6, 0.25, 0.7])
plt.plot(x, [0.1, 0.6, 0.25, 0.7, 1.2])
plt.plot(x, x/1.5)
plt.scatter(1.2, 0., c='red')
plt.show()
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title('Supervised')
plt.scatter([.0, .2, .1, .3], [.2, .1, .3, .4], c='red', label='nondiabetic')
plt.scatter([.6, .8, .9, .7], [.55, .74, .5, .8], c='blue', label='diabetic')
plt.ylabel('Weekly sugar intake')
plt.xlabel('BMI')
plt.legend(loc=2)
plt.subplot(122)
plt.title('Unsupervised')
plt.scatter([.6, .8, .9, .7]+[.0, .2, .1, .3], [.55, .74, .5, .8]+[.2, .1, .3, .4], c='black', label='diabetic')
plt.ylabel('Weekly sugar intake')
plt.xlabel('BMI')
plt.tight_layout()
x = np.linspace(0., 1., 50)
y = x + np.random.random(size=50) - 0.5
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title('Model A')
plt.scatter(x, y)
plt.plot(x, x)
plt.subplot(122)
plt.title('Model B')
plt.scatter(x, y)
plt.plot(x, [0.42]*50)
plt.tight_layout()
plt.show()
plt.figure(figsize=(7, 5))
x = np.linspace(1, 10, 99)
plt.plot(x, 1./x**0.5 - 0.1, label='$MSE_training$', lw=3)
plt.plot(np.linspace(1, 10, 7), [0.9, 0.6, 0.5, 0.45, 0.55, 0.7, 0.9], label='$MSE_{test}$', lw=3)
plt.ylabel('$MSE$')
plt.xlabel('flexibility')
plt.legend()
plt.show()
x = np.linspace(0., 1., 20)
y = [1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
plt.scatter(x, y)
plt.ylabel('Cougar occupied')
plt.xlabel('# of dogs')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Goal
Step2: How do we estimate $\hat{f}$?
Step3: Can fit this perfectly with a cubic model. But assuming that this is correct.
Step4: In the unsupervised case, we don't know the patient groups.
Step5: Model A is better because the $Ave(y-\hat{y})^2$ (Mean Squared Error) is smaller.
Step6: $MSE_{test}$ should bottom out around the "true" function. $MSE_{test}$ should never drop below the "true" amount of error/residuals. Goal is to minimize $MSE_{test}$.
|
13,972
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
vocab = set(text)
vocab_to_int = {c:i for i, c in enumerate(vocab)}
int_to_vocab = {i:c for i, c in enumerate(vocab)}
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
dict_punctuation = {
'.':'||Period||',
',':'||Comma||',
'"':'||Quotation_Mark||',
';':'||Semicolon||',
'!':'||Exclamation_Mark||',
'?':'||Question_Mark||',
'(':'||Left_Parenthesis||',
')':'||Right_Parenthesis||',
'--':'||Dash||',
'\n':'||Return||'
}
return dict_punctuation
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
inputs = tf.placeholder(tf.int32, [None, None], name = 'input')
targets = tf.placeholder(tf.int32, [None, None], name = 'targets')
learning_rate = tf.placeholder(tf.float32, name = 'learning_rate')
return inputs, targets, learning_rate
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
lstm_layers = 1
keep_prob = 1
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
cell_state = cell.zero_state(batch_size, tf.float32)
cell_state = tf.identity(cell_state, name = 'initial_state')
return cell, cell_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name = 'final_state')
return outputs, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
embed_dim = 256
def build_nn(cell, rnn_size, input_data, vocab_size):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
embed = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embed)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, weights_initializer=tf.truncated_normal_initializer(stddev=0.1))
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
batch_count = len(int_text)//(batch_size * seq_length)
counter = (batch_size * seq_length)
final = []
row = []
for i in range(batch_count):
x = int_text[i * counter : (i + 1) * counter]
x = np.reshape(x, (batch_size, seq_length))
y = int_text[(i * counter) + 1 : ((i + 1) * counter) + 1]
y = np.reshape(y, (batch_size, seq_length))
row = np.array([x,y])
final.append(row)
return np.array(final)
# test = get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)
# print(test)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 500
# Batch Size
batch_size = 1024
# RNN Size
rnn_size = 512
# Sequence Length
seq_length = 65
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 10
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
return loaded_graph.get_tensor_by_name('input:0'), loaded_graph.get_tensor_by_name('initial_state:0'), loaded_graph.get_tensor_by_name('final_state:0'), loaded_graph.get_tensor_by_name('probs:0')
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
return int_to_vocab.get(np.argmax(probabilities))
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 500
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
13,973
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
%matplotlib inline
import numpy
import matplotlib.pyplot as plt
from jyquickhelper import RenderJsDot
def plot_network(mat):
# Dessine un graph à l'aide du language DOT
# https://graphviz.org/doc/info/lang.html
rows = ["digraph{ ", ' rankdir="LR";', ' size="4,4";']
for i in range(max(mat.shape)):
rows.append(" %d;" % i)
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
if mat[i, j] > 0:
rows.append(" %d -> %d;" % (i, j))
rows.append("}")
dot = "\n".join(rows)
# print(dot) # décommenter cette ligne pour voir le résultat
return RenderJsDot(dot)
mat = numpy.array([[0, 1, 1, 1, 0],
[0, 0, 1, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
plot_network(mat)
def order_same_weight(mat):
# matrice la fin de chaque tâche
# au début, on suppose qu'elles se terminent toutes à l'origine des temps
fin = [-1 for i in range(mat.shape[0])]
for j in range(mat.shape[1]):
if mat[:, j].sum() == 0:
# si la tâche j ne dépend d'aucune autre tâche
# alors on peut commencer en 0
fin[j] = 0
update = True
while update:
update = False
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
if mat[i, j] == 0 or fin[i] == -1:
continue
# indique la j dépend de la tâche i
if fin[j] < fin[i] + 1:
update = True
fin[j] = fin[i] + 1
# fin[j] = max(fin[j], fin[i] + 1)
return fin
order_same_weight(mat)
mat2 = numpy.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0]])
plot_network(mat2)
order_same_weight(mat2)
def order_any_weight(mat, durations):
# mat est la matrice précédente
# duractions est la durée de chaque tâche (les durées sont entières)
# matrice la fin de chaque tâche
# au début, on suppose qu'elles se terminent toutes à l'origine des temps
fin = [-1 for i in range(mat.shape[0])]
for j in range(mat.shape[1]):
if mat[:, j].sum() == 0:
# si la tâche j ne dépend d'aucune autre tâche
# alors on peut commencer en 0
fin[j] = 0
update = True
while update:
update = False
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
if mat[i, j] == 0 or fin[i] == -1:
continue
# indique la j dépend de la tâche i
new_end = fin[i] + durations[i] ########### ligne changée
if fin[j] < new_end:
update = True
fin[j] = new_end
# fin[j] = max(fin[j], fin[i] + 1)
return fin
order_any_weight(mat, durations=[1, 1, 1, 1, 1])
order_any_weight(mat, durations=[1, 2, 1, 1, 1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enoncé
Step2: Le graphe se lit comme suit
Step3: On vérifie sur un graphe plus compliqué.
Step4: Q2
|
13,974
|
<ASSISTANT_TASK:>
Python Code:
import pandas
import numpy
import csv
#from scipy.stats import mode
from sklearn import neighbors
from sklearn.neighbors import DistanceMetric
from pprint import pprint
MY_TITANIC_TRAIN = 'train.csv'
MY_TITANIC_TEST = 'test.csv'
titanic_dataframe = pandas.read_csv(MY_TITANIC_TRAIN, header=0)
print('length: {0} '.format(len(titanic_dataframe)))
titanic_dataframe.head(5)
titanic_dataframe.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
print('dropped')
titanic_dataframe.describe()
titanic_dataframe.info()
# age_mean = numpy.mean(titanic_dataframe['Age'])
titanic_dataframe['Age'].fillna(numpy.mean(titanic_dataframe['Age']),inplace=True)
# titanic_dataframe.fillna(value=age_mean, axis=0)
titanic_dataframe.info()
titanic_dataframe.info()
# titanic_dataframe = titanic_dataframe.dropna()
titanic_dataframe['Embarked'].fillna(titanic_dataframe['Embarked'].mode().item(),inplace=True)
titanic_dataframe['Port'] = titanic_dataframe['Embarked'].map({'C':1, 'S':2, 'Q':3}).astype(int)
titanic_dataframe['Gender'] = titanic_dataframe['Sex'].map({'female': 0, 'male': 1}).astype(int)
titanic_dataframe = titanic_dataframe.drop(['Sex', 'Embarked', 'PassengerId', ], axis=1)
titanic_dataframe.info()
#Convert Columns to List
cols = titanic_dataframe.columns.tolist()
titanic_dataframe = titanic_dataframe[cols]
train_cols = [x for x in cols if x != 'Survived']
target_cols = [cols[0]]
print(train_cols, target_cols)
train_data = titanic_dataframe[train_cols]
target_data = titanic_dataframe[target_cols]
algorithm_data_model = neighbors.KNeighborsClassifier()
algorithm_data_model.fit(train_data.values, [value[0] for value in target_data.values])
df_test = pandas.read_csv('test.csv')
ids = df_test.PassengerId.values
df_test.drop(['Name', 'Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True)
print(len(df_test))
df_test.info()
mean_age = df_test.Age.mean()
df_test.Age.fillna(mean_age, inplace=True)
mean_fare = df_test.Fare.mean()
df_test.Fare.fillna(mean_fare, inplace=True)
df_test['Gender'] = df_test['Sex'].map({'female': 0, 'male': 1}).astype(int)
df_test['Port'] = df_test['Embarked'].map({'C':1, 'S':2, 'Q':3}).astype(int)
df_test = df_test.drop(['Sex', 'Embarked'], axis=1)
test_data = df_test.values
df_test.info()
titanic_dataframe.info()
output = algorithm_data_model.predict(df_test).astype(int)
print(output[:10])
result = numpy.c_[ids.astype(int), output]
print(result)
prediction_file = open('ourpredictions.csv', 'w')
open_file = csv.writer(prediction_file)
open_file.writerow(['PassengerId', 'Survived'])
open_file.writerows(zip(ids, output))
prediction_file.close()
%timeit algorithm_data_model.predict(df_test).astype(int)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Remove Columns
Step2: Which are the factors?
Step3: Pre-Processing
|
13,975
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib
import warnings
warnings.filterwarnings("ignore", category=matplotlib.cbook.MatplotlibDeprecationWarning)
%matplotlib inline
# The S3 URL did not work for me, despite .edu domain
#url = 'http://thredds-aws.unidata.ucar.edu/thredds/radarServer/nexrad/level2/S3/'
#Trying motherlode URL
url = 'http://thredds.ucar.edu/thredds/radarServer/nexrad/level2/IDD/'
from siphon.radarserver import RadarServer
rs = RadarServer(url)
from datetime import datetime, timedelta
query = rs.query()
query.stations('KLVX').time(datetime.utcnow())
rs.validate_query(query)
catalog = rs.get_catalog(query)
catalog.datasets
ds = list(catalog.datasets.values())[0]
ds.access_urls
from siphon.cdmr import Dataset
data = Dataset(ds.access_urls['CdmRemote'])
import numpy as np
def raw_to_masked_float(var, data):
# Values come back signed. If the _Unsigned attribute is set, we need to convert
# from the range [-127, 128] to [0, 255].
if var._Unsigned:
data = data & 255
# Mask missing points
data = np.ma.array(data, mask=data==0)
# Convert to float using the scale and offset
return data * var.scale_factor + var.add_offset
def polar_to_cartesian(az, rng):
az_rad = np.deg2rad(az)[:, None]
x = rng * np.sin(az_rad)
y = rng * np.cos(az_rad)
return x, y
sweep = 0
ref_var = data.variables['Reflectivity_HI']
ref_data = ref_var[sweep]
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
ref = raw_to_masked_float(ref_var, ref_data)
x, y = polar_to_cartesian(az, rng)
from metpy.plots import ctables # For NWS colortable
ref_norm, ref_cmap = ctables.registry.get_with_steps('NWSReflectivity', 5, 5)
import matplotlib.pyplot as plt
import cartopy
def new_map(fig, lon, lat):
# Create projection centered on the radar. This allows us to use x
# and y relative to the radar.
proj = cartopy.crs.LambertConformal(central_longitude=lon, central_latitude=lat)
# New axes with the specified projection
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add coastlines
ax.coastlines('50m', 'black', linewidth=2, zorder=2)
# Grab state borders
state_borders = cartopy.feature.NaturalEarthFeature(
category='cultural', name='admin_1_states_provinces_lines',
scale='50m', facecolor='none')
ax.add_feature(state_borders, edgecolor='black', linewidth=1, zorder=3)
return ax
query = rs.query()
#dt = datetime(2012, 10, 29, 15) # Our specified time
dt = datetime(2016, 6, 8, 18) # Our specified time
query.lonlat_point(-73.687, 41.175).time_range(dt, dt + timedelta(hours=1))
cat = rs.get_catalog(query)
cat.datasets
ds = list(cat.datasets.values())[0]
data = Dataset(ds.access_urls['CdmRemote'])
# Pull out the data of interest
sweep = 0
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
ref_var = data.variables['Reflectivity_HI']
# Convert data to float and coordinates to Cartesian
ref = raw_to_masked_float(ref_var, ref_var[sweep])
x, y = polar_to_cartesian(az, rng)
fig = plt.figure(figsize=(10, 10))
ax = new_map(fig, data.StationLongitude, data.StationLatitude)
# Set limits in lat/lon space
ax.set_extent([-77, -70, 38, 42])
# Add ocean and land background
ocean = cartopy.feature.NaturalEarthFeature('physical', 'ocean', scale='50m',
edgecolor='face',
facecolor=cartopy.feature.COLORS['water'])
land = cartopy.feature.NaturalEarthFeature('physical', 'land', scale='50m',
edgecolor='face',
facecolor=cartopy.feature.COLORS['land'])
ax.add_feature(ocean, zorder=-1)
ax.add_feature(land, zorder=-1)
#ax = new_map(fig, data.StationLongitude, data.StationLatitude)
ax.pcolormesh(x, y, ref, cmap=ref_cmap, norm=ref_norm, zorder=0);
meshes = []
for item in sorted(cat.datasets.items()):
# After looping over the list of sorted datasets, pull the actual Dataset object out
# of our list of items and access over CDMRemote
ds = item[1]
data = Dataset(ds.access_urls['CdmRemote'])
# Pull out the data of interest
sweep = 0
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
ref_var = data.variables['Reflectivity_HI']
# Convert data to float and coordinates to Cartesian
ref = raw_to_masked_float(ref_var, ref_var[sweep])
x, y = polar_to_cartesian(az, rng)
# Plot the data and the timestamp
mesh = ax.pcolormesh(x, y, ref, cmap=ref_cmap, norm=ref_norm, zorder=0)
text = ax.text(0.65, 0.03, data.time_coverage_start, transform=ax.transAxes,
fontdict={'size':16})
# Collect the things we've plotted so we can animate
meshes.append((mesh, text))
# Set up matplotlib to do the conversion to HTML5 video
import matplotlib
matplotlib.rcParams['animation.html'] = 'html5'
# Create an animation
from matplotlib.animation import ArtistAnimation
ArtistAnimation(fig, meshes)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll create an instance of RadarServer to point to the appropriate radar server access URL.
Step2: Next, we'll create a new query object to help request the data. Using the chaining methods, let's ask for the latest data at the radar KLVX (Louisville, KY). We see that when the query is represented as a string, it shows the encoded URL.
Step3: We can use the RadarServer instance to check our query, to make sure we have required parameters and that we have chosen valid station(s) and variable(s)
Step4: Make the request, which returns an instance of TDSCatalog; this handles parsing the returned XML information.
Step5: We can look at the datasets on the catalog to see what data we found by the query. We find one volume in the return, since we asked for the volume nearest to a single time.
Step6: We can pull that dataset out of the dictionary and look at the available access URLs. We see URLs for OPeNDAP, CDMRemote, and HTTPServer (direct download).
Step7: We'll use the CDMRemote reader in Siphon and pass it the appropriate access URL.
Step8: We define some helper functions to make working with the data easier. One takes the raw data and converts it to floating point values with the missing data points appropriately marked. The other helps with converting the polar coordinates (azimuth and range) to Cartesian (x and y).
Step9: The CDMRemote reader provides an interface that is almost identical to the usual python NetCDF interface. We pull out the variables we need for azimuth and range, as well as the data itself.
Step10: Then convert the raw data to floating point values and the polar coordinates to Cartesian.
Step11: MetPy is a Python package for meteorology (Documentation
Step12: Finally, we plot them up using matplotlib and cartopy. We create a helper function for making a map to keep things simpler later.
Step13: Download a collection of historical data
Step14: The specified longitude, latitude are in NY and the TDS helpfully finds the closest station to that point. We can see that for this time range we obtained multiple datasets.
Step15: Grab the first dataset so that we can get the longitude and latitude of the station and make a map for plotting. We'll go ahead and specify some longitude and latitude bounds for the map.
Step16: Use the function to make a new map and plot a colormapped view of the data
Step17: Now we can loop over the collection of returned datasets and plot them. As we plot, we collect the returned plot objects so that we can use them to make an animated plot. We also add a timestamp for each plot.
Step18: Using matplotlib, we can take a collection of Artists that have been plotted and turn them into an animation. With matplotlib 1.5 (1.5-rc2 is available now!), this animation can be converted to HTML5 video viewable in the notebook.
|
13,976
|
<ASSISTANT_TASK:>
Python Code:
from ipyleaflet import Map, basemaps, basemap_to_tiles
center = (52.204793, 360.121558)
m = Map(
layers=(basemap_to_tiles(basemaps.NASAGIBS.ModisTerraTrueColorCR, "2018-11-12"), ),
center=center,
zoom=4
)
m
from ipyleaflet import Marker, Icon
icon = Icon(icon_url='https://leafletjs.com/examples/custom-icons/leaf-red.png', icon_size=[38, 95], icon_anchor=[22,94])
mark = Marker(location=center, icon=icon, rotation_origin='22px 94px')
m.add_layer(mark)
import time
for _ in range(40):
mark.rotation_angle += 15
time.sleep(0.1)
from sidecar import Sidecar
from IPython.display import display
sc = Sidecar(title='Map widget')
with sc:
display(m)
from ipywidgets import Button, IntSlider, link
from ipyleaflet import Heatmap
from random import gauss
import time
center = (37.09, -103.66)
zoom = 5
def create_random_data(length):
"Return a list of some random lat/lon/value triples."
return [[gauss(center[0], 2),
gauss(center[1], 4),
gauss(700, 300)] for i in range(length)]
m.center = center
m.zoom = zoom
heat = Heatmap(locations=create_random_data(1000), radius=20, blur=10)
m.add_layer(heat)
def generate(_):
heat.locations = create_random_data(1000)
button = Button(description='Generate data', button_style='success')
button.on_click(generate)
button
slider = IntSlider(min=10, max=30, value=heat.radius)
link((slider, 'value'), (heat, 'radius'))
slider
from ipyleaflet import Velocity
import xarray as xr
center = (0, 0)
zoom = 4
m2 = Map(center=center, zoom=zoom, interpolation='nearest', basemap=basemaps.CartoDB.DarkMatter)
sc2 = Sidecar(title='Map Velocity')
with sc2:
display(m2)
ds = xr.open_dataset('src/wind-global.nc')
display_options = {
'velocityType': 'Global Wind',
'displayPosition': 'bottomleft',
'displayEmptyString': 'No wind data'
}
wind = Velocity(data=ds,
zonal_speed='u_wind',
meridional_speed='v_wind',
latitude_dimension='lat',
longitude_dimension='lon',
velocity_scale=0.01,
max_velocity=20,
display_options=display_options)
m2.add_layer(wind)
from ipyleaflet import Map, basemaps, basemap_to_tiles, SplitMapControl
m = Map(center=(42.6824, 365.581), zoom=5)
right_layer = basemap_to_tiles(basemaps.NASAGIBS.ModisTerraTrueColorCR, "2017-11-11")
left_layer = basemap_to_tiles(basemaps.NASAGIBS.ModisAquaBands721CR, "2017-11-11")
control = SplitMapControl(left_layer=left_layer, right_layer=right_layer)
m.add_control(control)
m
import numpy as np
import bqplot.pyplot as plt
from bqplot import *
from traitlets import observe
from sidecar import Sidecar
from ipywidgets import VBox, Button
from ipyleaflet import Map, Marker, Popup
axes_options = {'x': {'label': 'x'}, 'y': {'label': 'y'}}
x = np.arange(40)
y = np.cumsum(np.random.randn(2, 40), axis=1)
fig = plt.figure(animation_duration=1000)
lines = plt.plot(x=x, y=y, colors=['red', 'green'], axes_options=axes_options)
def generate(_):
lines.y = np.cumsum(np.random.randn(2, 40), axis=1)
button = Button(description='Generate data', button_style='success')
button.on_click(generate)
box_plot = VBox([fig, button])
fig
center = (52.204793, 360.121558)
m = Map(center=center, zoom=9, close_popup_on_click=False)
marker = Marker(location=(52.1, 359.9))
m.add_layer(marker)
marker.popup = box_plot
sc = Sidecar(title='Map and bqplot')
with sc:
display(m)
from ipywidgets import Widget
Widget.close_all()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Layers
Step2: <center><img src="src/jupyterlab-sidecar.svg" width="50%"></center>
Step3: Heatmap layer
Step4: Velocity
Step5: Controls
Step6: Plays well with other widgets libraries
Step7: Clean
|
13,977
|
<ASSISTANT_TASK:>
Python Code:
from four_way_interactions import four_way_from_ranking
from total_n_way_interaction import total_n_way_interaction
interaction = four_way_from_ranking([0, 1, 10, 11, 110, 111, 1000, 1001,
1010, 1011, 1100, 1101, 1110, 1111, 100, 101], 110)
print("[Positive interaction, Negative interaction] = " + str(interaction))
interaction = total_n_way_interaction([1, 10, 11, 100, 101, 110, 111, 1000,
1001, 1010, 1011, 1100, 1101, 1110, 1111, 10000,
10001, 10010, 10011, 10100, 10101, 10110, 10111, 11000,
11001, 11010, 11011, 11100, 11101, 11110, 11111, 0])
print("[Positive interaction, Negative interaction] = " + str(interaction))
ranking_9 = [1111, 1110, 1010, 100, 1000, 1100, 111, 110, 101, 11, 1001, 10, 1101, 1011, 1, 0]
u_0011 = four_way_from_ranking(ranking_9, 11)
print(u_0011)
u_0101 = four_way_from_ranking(ranking_9, 101)
print(u_0101)
u_0110 = four_way_from_ranking(ranking_9, 110)
print(u_0110)
u_1001 = four_way_from_ranking(ranking_9, 1001)
print(u_1001)
u_1010 = four_way_from_ranking(ranking_9, 1010)
print(u_1010)
u_1100 = four_way_from_ranking(ranking_9, 1100)
print(u_1100)
u_1110 = four_way_from_ranking(ranking_9, 1110)
print(u_1110)
u_1101 = four_way_from_ranking(ranking_9, 1101)
print(u_1101)
u_0111 = four_way_from_ranking(ranking_9, 111)
print(u_0111)
u_1011 = four_way_from_ranking(ranking_9, 1011)
print(u_1011)
u_1111 = four_way_from_ranking(ranking_9, 1111)
print(u_1111)
ranking_8 = [1111, 1110, 1010, 111, 110, 11, 100, 1000, 1100, 10, 101, 1001, 1011, 1101, 1, 0]
u_0011 = four_way_from_ranking(ranking_8, 11)
print(u_0011)
u_0101 = four_way_from_ranking(ranking_8, 101)
print(u_0101)
u_0110 = four_way_from_ranking(ranking_8, 110)
print(u_0110)
u_1001 = four_way_from_ranking(ranking_8, 1001)
print(u_1001)
u_1010 = four_way_from_ranking(ranking_8, 1010)
print(u_1010)
u_1100 = four_way_from_ranking(ranking_8, 1100)
print(u_1100)
u_1110 = four_way_from_ranking(ranking_8, 1110)
print(u_1110)
u_1101 = four_way_from_ranking(ranking_8, 1101)
print(u_1101)
u_0111 = four_way_from_ranking(ranking_8, 111)
print(u_0111)
u_1011 = four_way_from_ranking(ranking_8, 1011)
print(u_1011)
u_1111 = four_way_from_ranking(ranking_8, 1111)
print(u_1111)
ranking_7 = [1111, 111, 1110, 1010, 11, 1011, 110, 1000, 10, 1100, 101, 1001, 1101, 100, 1, 0]
u_0011 = four_way_from_ranking(ranking_7, 11)
print(u_0011)
u_0101 = four_way_from_ranking(ranking_7, 101)
print(u_0101)
u_0110 = four_way_from_ranking(ranking_7, 110)
print(u_0110)
u_1001 = four_way_from_ranking(ranking_7, 1001)
print(u_1001)
u_1010 = four_way_from_ranking(ranking_7, 1010)
print(u_1010)
u_1100 = four_way_from_ranking(ranking_7, 1100)
print(u_1100)
u_1110 = four_way_from_ranking(ranking_7, 1110)
print(u_1110)
u_1101 = four_way_from_ranking(ranking_7, 1101)
print(u_1101)
u_0111 = four_way_from_ranking(ranking_7, 111)
print(u_0111)
u_1011 = four_way_from_ranking(ranking_7, 1011)
print(u_1011)
u_1111 = four_way_from_ranking(ranking_7, 1111)
print(u_1111)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Four-way interaction coordinates
Step2: The latter 110 in the four_way_from_ranking call corresponds to u.
Step3: The output is a pair of truth values corresponding to the positive and negative interaction, respectively.
|
13,978
|
<ASSISTANT_TASK:>
Python Code:
import re
import pytz
import gdelt
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import geoplot as gplt
from tzwhere import tzwhere
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
tz1 = tzwhere.tzwhere(forceTZ=True)
gd = gdelt.gdelt()
%time vegas = gd.Search(['Oct 1 2017','Oct 2 2017'],normcols=True,coverage=True)
def striptimen(x):
Strip time from numpy array or list of dates that are integers
date = str(int(x))
n = np.datetime64("{}-{}-{}T{}:{}:{}".format(date[:4],date[4:6],date[6:8],date[8:10],date[10:12],date[12:]))
return n
def timeget(x):
'''convert to datetime object with UTC time tag'''
try:
now_aware = pytz.utc.localize(x[2].to_pydatetime())
except:
pass
# get the timezone string representation using lat/lon pair
try:
timezone_str=tz1.tzNameAt(x[0],x[1],forceTZ=True)
# get the time offset
timezone = pytz.timezone(timezone_str)
# convert UTC to calculated local time
aware = now_aware.astimezone(timezone)
return aware
except Exception as e:
pass
# vectorize our two functions
vect = np.vectorize(striptimen)
vect2=np.vectorize(timeget)
# vectorize our function
vect = np.vectorize(striptimen)
# use custom functions to build time enabled columns of dates and zone
vegastimed = (vegas.assign(
dates=vect(vegas.dateadded.values)).assign(
zone=list(timeget(k) for k in vegas.assign(
dates=vect(vegas.dateadded.values))\
[['actiongeolat','actiongeolong','dates']].values)))
# filter to data in Las Vegas and about violence/fighting/mass murder only
vegastimedfil=(vegastimed[
((vegas.eventrootcode=='19') |
(vegas.eventrootcode=='20') |
(vegas.eventrootcode=='18')) &
(vegas.actiongeofeatureid=='847388')])\
.drop_duplicates('sourceurl')
print(vegastimedfil.shape)
# lazy meta-character regex; more elegant
s = re.compile('(http://|https://)([A-Za-z0-9_\.-]+)')
# build the chronological news stories and show the first few rows
print(vegastimedfil.set_index('zone')[['dates','sourceurl']].head())
# example of converting to Los Angeles time.
vegastimed.set_index(
vegastimed.dates.astype('datetime64[ns]')
).tz_localize(
'UTC'
).tz_convert(
'America/Los_Angeles'
)
# regex to strip a url from a string; should work on any url (let me know if it doesn't)
s = re.compile('(http://|https://)([A-Za-z0-9_\.-]+)')
# apply regex to each url; strip provider; assign as new column
print(vegastimedfil.assign(provider=vegastimedfil.sourceurl.\
apply(lambda x: s.search(x).group() if s.search(x) else np.nan))\
.groupby(['provider']).size().sort_values(ascending=False).reset_index().rename(columns={0:"count"}).head())
# chained operation to return shape
vegastimedfil.assign(provider=vegastimedfil.sourceurl.\
apply(lambda x: s.search(x).group() if \
s.search(x) else np.nan))['provider']\
.value_counts().shape
# make plot canvas
f,ax = plt.subplots(figsize=(15,5))
# set title
plt.title('Distributions of Las Vegas Active Shooter News Production')
# ckernel density plot
sns.kdeplot(vegastimedfil.assign(provider=vegastimedfil.sourceurl.\
apply(lambda x: s.search(x).group() if s.search(x) else np.nan))['provider']\
.value_counts(),bw=0.4,shade=True,label='No. of articles written',ax=ax)
# cumulative distribution plot
sns.kdeplot(vegastimedfil.assign(provider=vegastimedfil.sourceurl.\
apply(lambda x: s.search(x).group() if s.search(x) else np.nan))['provider']\
.value_counts(),bw=0.4,shade=True,label='Cumulative',cumulative=True,ax=ax)
# show it
plt.show()
timeseries = pd.concat([vegastimed.set_index(vegastimed.dates.astype('datetime64[ns]')).tz_localize('UTC').tz_convert('America/Los_Angeles').resample('15T')['sourceurl'].count(),vegastimedfil.set_index('zone').resample('15T')['sourceurl'].count()]
,axis=1)
# file empty event counts with zero
timeseries.fillna(0,inplace=True)
# rename columns
timeseries.columns = ['Total Events','Las Vegas Events Only']
# combine
timeseries = timeseries.assign(Normalized=(timeseries['Las Vegas Events Only']/timeseries['Total Events'])*100)
# make the plot
f,ax = plt.subplots(figsize=(13,7))
ax = timeseries.Normalized.ewm(adjust=True,ignore_na=True,min_periods=10,span=20).mean().plot(color="#C10534",label='Exponentially Weighted Count')
ax.set_title('Reports of Violent Events Per 15 Minutes in Vegas',fontsize=28)
for label in ax.get_xticklabels():
label.set_fontsize(16)
ax.set_xlabel('Hour of the Day', fontsize=20)
ax.set_ylabel('Percentage of Hourly Total',fontsize='15')
ax.legend()
plt.tight_layout()
plt.show()
# complex, chained operations to perform all steps listed above
print((((vegastimedfil.reset_index().assign(provider=vegastimedfil.reset_index().sourceurl.\
apply(lambda x: s.search(x).group() if s.search(x) else np.nan),\
epochzone=vegastimedfil.set_index('dates')\
.reset_index()['dates']\
.apply(lambda x: (x.to_pydatetime().timestamp()))).groupby('provider')\
.filter(lambda x: len(x)>=10).groupby('provider').agg([np.mean,np.max,np.min,np.median])\
.sort_index(level='median',ascending=False)['epochzone']['median'])\
.apply(lambda x:datetime.datetime.fromtimestamp(int(x)))\
.sort_values(ascending=True)).reset_index()\
.set_index('median',drop=False)).tz_localize('UTC')\
.tz_convert('America/Los_Angeles'))
# Author: Linwood Creekmore
# Email: valinvescap@gmail.com
# Description: Python script to pull content from a website (works on news stories).
# Notes
23 Oct 2017: updated to include readability based on PyCon talk: https://github.com/DistrictDataLabs/PyCon2016/blob/master/notebooks/tutorial/Working%20with%20Text%20Corpora.ipynb
###################################
# Standard Library imports
###################################
import re
from io import BytesIO
###################################
# Third party imports
###################################
import requests
import numpy as np
from bs4 import BeautifulSoup
from readability.readability import Document as Paper
# placeholder dictionary to keep track of what's been completed
done ={}
def textgetter(url):
Scrapes web news and returns the content
Parameters
----------
url : str
Address to news report
newstext: str
Returns all text in the "p" tag. This usually is the content of the news story.
global done
TAGS = [
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'p', 'li'
]
# regex for url check
s = re.compile('(http://|https://)([A-Za-z0-9_\.-]+)')
answer = {}
# check that its an url
if s.search(url):
if url in done.keys():
return done[url]
pass
else:
r = requests.get(url)
if r.status_code != 200:
done[url]="Unable to reach website."
answer['base']=s.search(url).group()
answer['url']=url
answer['text']="Unable to reach website."
answer['title']=''
yield answer
doc = Paper(r.content)
data = doc.summary()
title = doc.title()
soup = BeautifulSoup(data,'lxml')
newstext = " ".join([l.text for l in soup.find_all(TAGS)])
del r,data
if len(newstext)>200:
answer['base']=s.search(url).group()
answer['text']=newstext
answer['url']=url
answer['title']=title
yield answer
else:
newstext = " ".join([l.text for l in soup.find_all('div',class_='field-item even')])
done[url]=newstext
if len(newstext)>200:
answer['url']=url
answer['base']=s.search(url).group()
answer['text']=newstext
answer['title']=""
yield answer
else:
answer['url']=url
answer['base']=s.search(url).group()
answer['text']='No text returned'
answer['title']=""
yield answer
else:
answer['text']='This is not a proper url'
answer['url']=url
answer['base']=''
answer['title']=""
yield answer
# create vectorized function
vect = np.vectorize(textgetter)
#vectorize the operation
cc = vect(vegastimedfil['sourceurl'].values[10:25])
#Vectorized opp
dd = list(next(l) for l in cc)
# the output
pd.DataFrame(dd).head(5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting up gdeltPyR
Step3: Time format transformations
Step4: Now we apply the functions to create a datetime object column (dates) and a timezone aware column (datezone).
Step5: Filtering to a city and specific CAMEO Code
Step6: Stripping out unique news providers
Step7: Build Chronological List
Step8: To time enable the entire dataset, it's a fairly simple task.
Step9: Counting Who Produced the Most
Step10: How many unique news providers?
Step11: Understanding how many providers we have producing, it would be a good idea to understand the distribution of production. Or, we want to see how many articles each provider published. We use a distribution and cumulative distribution plot.
Step12: Time Series
Step13: Finding Who Produced the "Fastest"
Step16: Getting the Content
Step17: Testing the Function
|
13,979
|
<ASSISTANT_TASK:>
Python Code:
import os
import pickle
import sys
import nltk
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.layers import (
Dense,
Embedding,
GRU,
Input,
)
from tensorflow.keras.models import (
load_model,
Model,
)
import utils_preproc
print(tf.__version__)
SEED = 0
MODEL_PATH = 'translate_models/baseline'
DATA_URL = 'http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip'
LOAD_CHECKPOINT = False
tf.random.set_seed(SEED)
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin=DATA_URL, extract=True)
path_to_file = os.path.join(
os.path.dirname(path_to_zip),
"spa-eng/spa.txt"
)
print("Translation data stored at:", path_to_file)
data = pd.read_csv(
path_to_file, sep='\t', header=None, names=['english', 'spanish'])
data.sample(3)
raw = [
"No estamos comiendo.",
"Está llegando el invierno.",
"El invierno se acerca.",
"Tom no comio nada.",
"Su pierna mala le impidió ganar la carrera.",
"Su respuesta es erronea.",
"¿Qué tal si damos un paseo después del almuerzo?"
]
processed = [utils_preproc.preprocess_sentence(s) for s in raw]
processed
integerized, tokenizer = utils_preproc.tokenize(processed)
integerized
tokenizer.sequences_to_texts(integerized)
def load_and_preprocess(path, num_examples):
with open(path_to_file, 'r') as fp:
lines = fp.read().strip().split('\n')
# TODO 1a
sentence_pairs = [
[utils_preproc.preprocess_sentence(sent) for sent in line.split('\t')]
for line in lines[:num_examples]
]
return zip(*sentence_pairs)
en, sp = load_and_preprocess(path_to_file, num_examples=10)
print(en[-1])
print(sp[-1])
def load_and_integerize(path, num_examples=None):
targ_lang, inp_lang = load_and_preprocess(path, num_examples)
# TODO 1b
input_tensor, inp_lang_tokenizer = utils_preproc.tokenize(inp_lang)
target_tensor, targ_lang_tokenizer = utils_preproc.tokenize(targ_lang)
return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer
TEST_PROP = 0.2
NUM_EXAMPLES = 30000
input_tensor, target_tensor, inp_lang, targ_lang = load_and_integerize(
path_to_file, NUM_EXAMPLES)
max_length_targ = target_tensor.shape[1]
max_length_inp = input_tensor.shape[1]
splits = train_test_split(
input_tensor, target_tensor, test_size=TEST_PROP, random_state=SEED)
input_tensor_train = splits[0]
input_tensor_val = splits[1]
target_tensor_train = splits[2]
target_tensor_val = splits[3]
(len(input_tensor_train), len(target_tensor_train),
len(input_tensor_val), len(target_tensor_val))
print("Input Language; int to word mapping")
print(input_tensor_train[0])
print(utils_preproc.int2word(inp_lang, input_tensor_train[0]), '\n')
print("Target Language; int to word mapping")
print(target_tensor_train[0])
print(utils_preproc.int2word(targ_lang, target_tensor_train[0]))
def create_dataset(encoder_input, decoder_input):
# TODO 1c
# shift ahead by 1
target = tf.roll(decoder_input, -1, 1)
# replace last column with 0s
zeros = tf.zeros([target.shape[0], 1], dtype=tf.int32)
target = tf.concat((target[:, :-1], zeros), axis=-1)
dataset = tf.data.Dataset.from_tensor_slices(
((encoder_input, decoder_input), target))
return dataset
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
train_dataset = create_dataset(
input_tensor_train, target_tensor_train).shuffle(
BUFFER_SIZE).repeat().batch(BATCH_SIZE, drop_remainder=True)
eval_dataset = create_dataset(
input_tensor_val, target_tensor_val).batch(
BATCH_SIZE, drop_remainder=True)
EMBEDDING_DIM = 256
HIDDEN_UNITS = 1024
INPUT_VOCAB_SIZE = len(inp_lang.word_index) + 1
TARGET_VOCAB_SIZE = len(targ_lang.word_index) + 1
encoder_inputs = Input(shape=(None,), name="encoder_input")
# TODO 2a
encoder_inputs_embedded = Embedding(
input_dim=INPUT_VOCAB_SIZE,
output_dim=EMBEDDING_DIM,
input_length=max_length_inp)(encoder_inputs)
encoder_rnn = GRU(
units=HIDDEN_UNITS,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
encoder_outputs, encoder_state = encoder_rnn(encoder_inputs_embedded)
decoder_inputs = Input(shape=(None,), name="decoder_input")
# TODO 2b
decoder_inputs_embedded = Embedding(
input_dim=TARGET_VOCAB_SIZE,
output_dim=EMBEDDING_DIM,
input_length=max_length_targ)(decoder_inputs)
decoder_rnn = GRU(
units=HIDDEN_UNITS,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
decoder_outputs, decoder_state = decoder_rnn(
decoder_inputs_embedded, initial_state=encoder_state)
decoder_dense = Dense(TARGET_VOCAB_SIZE, activation='softmax')
predictions = decoder_dense(decoder_outputs)
# TODO 2c
model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=predictions)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.summary()
STEPS_PER_EPOCH = len(input_tensor_train)//BATCH_SIZE
EPOCHS = 1
history = model.fit(
train_dataset,
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=eval_dataset,
epochs=EPOCHS
)
if LOAD_CHECKPOINT:
encoder_model = load_model(os.path.join(MODEL_PATH, 'encoder_model.h5'))
decoder_model = load_model(os.path.join(MODEL_PATH, 'decoder_model.h5'))
else:
# TODO 3a
encoder_model = Model(inputs=encoder_inputs, outputs=encoder_state)
decoder_state_input = Input(shape=(HIDDEN_UNITS,), name="decoder_state_input")
# Reuses weights from the decoder_rnn layer
decoder_outputs, decoder_state = decoder_rnn(
decoder_inputs_embedded, initial_state=decoder_state_input)
# Reuses weights from the decoder_dense layer
predictions = decoder_dense(decoder_outputs)
decoder_model = Model(
inputs=[decoder_inputs, decoder_state_input],
outputs=[predictions, decoder_state]
)
def decode_sequences(input_seqs, output_tokenizer, max_decode_length=50):
Arguments:
input_seqs: int tensor of shape (BATCH_SIZE, SEQ_LEN)
output_tokenizer: Tokenizer used to conver from int to words
Returns translated sentences
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seqs)
# Populate the first character of target sequence with the start character.
batch_size = input_seqs.shape[0]
target_seq = tf.ones([batch_size, 1])
decoded_sentences = [[] for _ in range(batch_size)]
# TODO 4: Sampling loop
for i in range(max_decode_length):
output_tokens, decoder_state = decoder_model.predict(
[target_seq, states_value])
# Sample a token
sampled_token_index = np.argmax(output_tokens[:, -1, :], axis=-1)
tokens = utils_preproc.int2word(output_tokenizer, sampled_token_index)
for j in range(batch_size):
decoded_sentences[j].append(tokens[j])
# Update the target sequence (of length 1).
target_seq = tf.expand_dims(tf.constant(sampled_token_index), axis=-1)
# Update states
states_value = decoder_state
return decoded_sentences
sentences = [
"No estamos comiendo.",
"Está llegando el invierno.",
"El invierno se acerca.",
"Tom no comio nada.",
"Su pierna mala le impidió ganar la carrera.",
"Su respuesta es erronea.",
"¿Qué tal si damos un paseo después del almuerzo?"
]
reference_translations = [
"We're not eating.",
"Winter is coming.",
"Winter is coming.",
"Tom ate nothing.",
"His bad leg prevented him from winning the race.",
"Your answer is wrong.",
"How about going for a walk after lunch?"
]
machine_translations = decode_sequences(
utils_preproc.preprocess(sentences, inp_lang),
targ_lang,
max_length_targ
)
for i in range(len(sentences)):
print('-')
print('INPUT:')
print(sentences[i])
print('REFERENCE TRANSLATION:')
print(reference_translations[i])
print('MACHINE TRANSLATION:')
print(machine_translations[i])
if not LOAD_CHECKPOINT:
os.makedirs(MODEL_PATH, exist_ok=True)
# TODO 3b
model.save(os.path.join(MODEL_PATH, 'model.h5'))
encoder_model.save(os.path.join(MODEL_PATH, 'encoder_model.h5'))
decoder_model.save(os.path.join(MODEL_PATH, 'decoder_model.h5'))
with open(os.path.join(MODEL_PATH, 'encoder_tokenizer.pkl'), 'wb') as fp:
pickle.dump(inp_lang, fp)
with open(os.path.join(MODEL_PATH, 'decoder_tokenizer.pkl'), 'wb') as fp:
pickle.dump(targ_lang, fp)
def bleu_1(reference, candidate):
reference = list(filter(lambda x: x != '', reference)) # remove padding
candidate = list(filter(lambda x: x != '', candidate)) # remove padding
smoothing_function = nltk.translate.bleu_score.SmoothingFunction().method1
return nltk.translate.bleu_score.sentence_bleu(
reference, candidate, (1,), smoothing_function)
def bleu_4(reference, candidate):
reference = list(filter(lambda x: x != '', reference)) # remove padding
candidate = list(filter(lambda x: x != '', candidate)) # remove padding
smoothing_function = nltk.translate.bleu_score.SmoothingFunction().method1
return nltk.translate.bleu_score.sentence_bleu(
reference, candidate, (.25, .25, .25, .25), smoothing_function)
%%time
num_examples = len(input_tensor_val)
bleu_1_total = 0
bleu_4_total = 0
for idx in range(num_examples):
# TODO 5
reference_sentence = utils_preproc.int2word(
targ_lang, target_tensor_val[idx][1:])
decoded_sentence = decode_sequences(
input_tensor_val[idx:idx+1], targ_lang, max_length_targ)[0]
bleu_1_total += bleu_1(reference_sentence, decoded_sentence)
bleu_4_total += bleu_4(reference_sentence, decoded_sentence)
print('BLEU 1: {}'.format(bleu_1_total/num_examples))
print('BLEU 4: {}'.format(bleu_4_total/num_examples))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Downloading the Data
Step2: From the utils_preproc package we have written for you,
Step3: Sentence Integerizing
Step4: The outputted tokenizer can be used to get back the actual works
Step5: Creating the tf.data.Dataset
Step6: load_and_integerize
Step7: Train and eval splits
Step8: Now let's load and integerize the sentence paris and store the tokenizer for the source and the target language into the int_lang and targ_lang variable respectively
Step9: Let us store the maximal sentence length of both languages into two variables
Step10: We are now using scikit-learn train_test_split to create our splits
Step11: Let's make sure the number of example in each split looks good
Step12: The utils_preproc.int2word function allows you to transform back the integerized sentences into words. Note that the <start> token is alwasy encoded as 1, while the <end> token is always encoded as 0
Step13: Create tf.data dataset for train and eval
Step14: Let's now create the actual train and eval dataset using the function above
Step15: Training the RNN encoder-decoder model
Step16: Let's implement the encoder network with Keras functional API. It will
Step17: We now implement the decoder network, which is very similar to the encoder network.
Step18: The last part of the encoder-decoder architecture is a softmax Dense layer that will create the next word probability vector or next word predictions from the decoder_output
Step19: To be able to train the encoder-decoder network defined above, we now need to create a trainable Keras Model by specifying which are the inputs and the outputs of our problem. They should correspond exactly to what the type of input/output in our train and eval tf.data.Dataset since that's what will be fed to the inputs and outputs we declare while instantiating the Keras Model.
Step20: Let's now train the model!
Step21: Implementing the translation (or decoding) function
Step23: Now that we have a separate encoder and a separate decoder, let's implement a translation function, to which we will give the generic name of decode_sequences (to stress that this procedure is general to all seq2seq problems).
Step24: Now we're ready to predict!
Step25: Checkpoint Model
Step26: Evaluation Metric (BLEU)
Step27: Let's now average the bleu_1 and bleu_4 scores for all the sentence pairs in the eval set. The next cell takes some time to run, the bulk of which is decoding the 6000 sentences in the validation set. Please wait unitl completes.
|
13,980
|
<ASSISTANT_TASK:>
Python Code:
# imports
from astropy import units as u
from astropy.coordinates import SkyCoord
import specdb
from specdb.specdb import SpecDB
from specdb import specdb as spdb_spdb
from specdb.cat_utils import flags_to_groups
db_file = specdb.__path__[0]+'/tests/files/IGMspec_DB_v02_debug.hdf5'
reload(spdb_spdb)
sdb = spdb_spdb.SpecDB(db_file=db_file)
ggg_meta = sdb['GGG'].meta
ggg_meta[0:4]
qdict = {'TELESCOPE': 'Gemini-North', 'NPIX': (1580,1583), 'DISPERSER': ['B600', 'R400']}
qmeta = sdb.query_meta(qdict)
qmeta
qdict = {'R': (4000.,1e9), 'WV_MIN': (0., 4000.)}
qmeta = sdb.query_meta(qdict)
qmeta
qdict = {'R': (1800.,2500), 'WV_MIN': (0., 4000.)}
qmeta = sdb.query_meta(qdict)
qmeta['GROUP'].data
meta = sdb.meta_from_position((0.0019,17.7737), 1*u.arcsec)
meta
meta = sdb.meta_from_position('001115.23+144601.8', 1*u.arcsec)
meta['WV_MIN'].data
meta = sdb.meta_from_position((2.813500,14.767200), 20*u.deg)
meta[0:3]
meta['GROUP'].data
meta = sdb.meta_from_position((2.813500,14.767200), 20*u.deg, groups=['GGG','HD-LLS_DR1'])
meta['GROUP'].data
coord = SkyCoord(ra=0.0019, dec=17.7737, unit='deg')
matches, meta = sdb.meta_from_coords(coord)
meta
coord = SkyCoord(ra=0.0019, dec=-17.7737, unit='deg')
matches, meta = sdb.meta_from_coords(coord)
print(meta)
coord = SkyCoord(ra=2.813458, dec=14.767167, unit='deg')
_, meta = sdb.meta_from_coords(coord)
meta
coords = SkyCoord(ra=[0.0028,2.813458], dec=[14.9747,14.767167], unit='deg')
matches, meta = sdb.meta_from_coords(coords)
print(matches)
meta
coords = SkyCoord(ra=[0.0028,9.99,2.813458], dec=[14.9747,-9.99,14.767167], unit='deg')
matches, meta = sdb.meta_from_coords(coords)
print(matches)
meta
coords = SkyCoord(ra=[0.0028,2.813458], dec=[14.9747,14.767167], unit='deg')
matches, meta = sdb.meta_from_coords(coords, groups=['GGG'])
print(matches)
print(meta['IGM_ID'])
meta
coords = SkyCoord(ra=[0.0028,2.813458], dec=[14.9747,14.767167], unit='deg')
matches, list_of_meta, meta_stack = sdb.meta_from_coords(coords, first=False)
print('Matches = ', matches)
list_of_meta, meta_stack[list_of_meta[0]]
matches, list_of_meta, meta_stack = sdb.meta_from_coords(coords, first=False, groups=['GGG'])
list_of_meta, meta_stack[list_of_meta[1]]
coords = SkyCoord(ra=[0.0028,9.99,2.813458], dec=[14.9747,-9.99,14.767167], unit='deg')
matches, list_of_meta, meta_stack = sdb.meta_from_coords(coords, first=False)
print('Matches = ', matches)
meta_stack[list_of_meta[0]]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup
Step2: Check one of the meta tables
Step3: Query meta with Query dict
Step4: Another example
Step5: One more
Step6: Query meta at position
Step7: Multiple meta entries (GGG)
Step8: Multiple sources
Step9: Restrict on groups
Step10: Query Meta with Coordinates list
Step11: Single source which fails to match
Step12: Source where multiple spectra exist, but only the first record is returned
Step13: Multiple coordinates, each matched
Step14: Multiple coordinates, one fails to match by coordinate
Step15: Multiple coordiantes, one fails to match input group list
Step16: All Meta Data for each input coordinate
Step17: Two sources, limit by groups
Step18: Three sources; second one has no match
|
13,981
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import print_function # only necessary if using Python 2.x
import matplotlib.pyplot as plt
import numpy as np
from pyshtools.shclasses import SHCoeffs, SHWindow, SHGrid
nl = 100 # l = [0, 199]
lmax = nl - 1
a = 4 # scale length
ls = np.arange(nl, dtype=np.float)
power = 1. / (1. + (ls / a) ** 2) ** 1.5
coeffs_global = SHCoeffs.from_random(power)
power_global = coeffs_global.spectrum()
coeffs_global.plot_spectrum(unit='per_dlogl')
grid_global = coeffs_global.expand('DH2')
grid_global.plot()
latgrid, longrid = np.meshgrid(grid_global.lats(), grid_global.lons(), indexing='ij')
window = (-40 < latgrid) & (latgrid < -30) & (10 < longrid) & (longrid < 30)
window += (0 < latgrid) & (latgrid < 30) & (60 < longrid) & (longrid < 80)
window += (-70 < latgrid) & (latgrid < -40) & (130 < longrid) & (longrid < 150)
window += (20 < latgrid) & (latgrid < 40) & (125 < longrid) & (longrid < 145)
window += (10 < latgrid) & (latgrid < 30) & (220 < longrid) & (longrid < 250)
grid_window = SHGrid.from_array(window.astype(np.float64)[::-1]) # DH grid starts at north-pole
grid_window.plot()
coeffs_window = grid_window.expand()
coeffs_window.plot_spectrum(unit='per_dlogl')
power_window = coeffs_window.spectrum()
grid_local = SHGrid.from_array(grid_global.data * grid_window.data)
grid_local.plot()
coeffs_local = grid_local.expand()
coeffs_local.plot_spectrum(unit='per_dlogl')
power_local = coeffs_local.spectrum()
from pyshtools.spectralanalysis import SHMTCouplingMatrix
coupling_matrix = SHMTCouplingMatrix(lmax, power_window.reshape(nl, 1))
fig, (col1, col2) = plt.subplots(1, 2, figsize=(10, 3))
col1.imshow(coupling_matrix, cmap='viridis')
col1.set(xlabel='input degree l', ylabel='output degree l', xlim=(-0.5, 10.5), ylim=(10.5, -0.5),
title='zoomed coupling matrix');
col2.imshow(coupling_matrix, cmap='viridis')
col2.set(xlabel='input degree l', ylabel='output degree l', title='full coupling matrix');
from pyshtools.spectralanalysis import SHBias
power_local2 = np.dot(coupling_matrix, power)
power_local3 = SHBias(power_window, power)
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
ax.plot(ls, power_global, label='measured global')
ax.plot(ls, power_local, label='measured local')
ax.plot(power_local2, label='predicted local (CouplingMatrix)')
ax.plot(power_local3, 'x', label='predicted local (SHBias)')
ax.legend(loc=3)
ax.set_xscale('log', basex=2)
ax.set_yscale('log', basey=2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we generate random coefficients from this input power spectrum, plot the power spectrum of the random realization, and expand the coeffificients on a grid
Step2: Generate a box window function
Step3: Next, generate an SHGrid instance from the input array, plot the grid, expand it in spherical harmonics, and calculate and plot the power spectrum. Note that the first element of the grid corresponds to 0 degrees longitude and 90 degrees north.
Step4: Multiply the random model with the window
Step5: Compute the coupling matrix
Step6: Predict the biased spectrum
|
13,982
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# Random time series.
n = 1000
rs = np.random.RandomState(42)
data = rs.randn(n, 4).cumsum(axis=0)
plt.figure(figsize=(15,5))
plt.plot(data[:, :])
# df = pd.DataFrame(...)
# df.plot(...)
data = [10, 40, 25, 15, 10]
categories = list('ABCDE')
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# Right plot.
# axes[1].
# axes[1].
# Left plot.
# axes[0].
# axes[0].
import seaborn as sns
import os
df = sns.load_dataset('iris', data_home=os.path.join('..', 'data'))
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# Your code for Seaborn: distplot() and boxplot().
import ggplot
# Your code for ggplot.
import altair
# altair.Chart(df).mark_bar(opacity=.75).encode(
# x=...,
# y=...,
# color=...
# )
# One line with Seaborn.
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
# df['pca1'] =
# df['pca2'] =
# df['tsne1'] =
# df['tsne2'] =
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.swarmplot(x='pca1', y='pca2', data=df, hue='species', ax=axes[0])
sns.swarmplot(x='tsne1', y='tsne2', data=df, hue='species', ax=axes[1]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2 Categories
Step2: 3 Frequency
Step3: 4 Correlation
Step4: 5 Dimensionality reduction
|
13,983
|
<ASSISTANT_TASK:>
Python Code:
# define your first ever function
def my_pet(your_favourite_animal):
print(your_favourite_animal + " is the best!")
print("Congratulations, you have used your first ever python function!")
# Hint = if you are getting this error:
# TypeError: function_name() missing 1 required positional argument: 'your_favourite_animal'
# It means that you called a function without supplying it with input
my_pet('Mr Toad')
def square_fun(x):
return x**2
result = square_fun(4)
result
### 'def' allows us to define a function and call it 'fibonacci'
## 'N' allows us to pass an argument (value/statement) into the function, in this case, 'N' will be a number of our choice.
def fibonacci(N):
L = [] # 1
a, b = 0, 1 # 2
while len(L) < N: # 3
a, b = b, a + b # 4
L.append(a) # 5
return L # 6
# 1 'L' creates an empty data set, so that we can put data into it and save.
# 2 Here we are just defining variables 'a, b' to the starting numbers '0, 1'
# 3 Now we start a continous loop 'while' and it continues indefinitely until the length(L) is > 'N' before stopping
# 4 Do some maths and save over the values 'a, b' This allows us to save the value of 'a' into the empty dataset 'L'.
# 5 We append the data so that it does not overwrite previous values.
# 6 Return 'L' to python
fibonacci(10)
# Now we can call the function by typing the name, and putting a value of our choice into the function using brackets.
fibonacci(10, 0, 2)
# This doesn't work?
# Why?
# Well, we have not told the function that 'a, b' should be the values '0, 2'
# Time to redesign the function a bit!
def fibonacci(N, a=0, b=1):
Calculate Fibonacci sequence
L = [] # 1
while len(L) < N: # 3
a, b = b, a + b # 4
L.append(a) # 5
return L # 6
fibonacci(b=3, a=1, N=10)
def calc_theta(t, p, p0=1e5, r_d=287.04, c_p=1004.5):
Calculate air potential temperature
Parameters
==========
t : air temperature (K)
p : air pressure (Pa)
Optional inputs
---------------
p0 : reference pressure (Pa), optional. Default is 1e5.
r_d : gas constant of air (J kg^-1 K^-1), optional. Default is 287.04.
c_p : specific heat capacity at a constant pressure (J kg^-1 K^-1), optional. Default is 1004.5.
Returns
=======
theta: air potential temperature (K)
theta = t * (p0 / p) **(r_d / c_p)
return theta
# Your code here
# help(calc_theta)
def catch_all(*args, **kwargs):
print("args =", args)
print("kwargs = ", kwargs)
catch_all(1, 2, 3, python=4, conda=5)
catch_all('a', keyword=2)
inputs = (1, 2, 3)
keywords = {'pi': 3.14}
catch_all(*inputs, **keywords)
print()
print('compare to this:')
print()
catch_all(inputs, keywords)
add = lambda x, y: x + y
add(1, 2)
def add(x, y):
return x + y
data = [{'first':'Guido', 'last':'Van Rossum', 'YOB':1956},
{'first':'Grace', 'last':'Hopper', 'YOB':1906},
{'first':'Alan', 'last':'Turing', 'YOB':1912}]
sorted([2,4,3,5,1,6])
# sort alphabetically by first name
sorted(data, key=lambda item: item['first'])
# sort by year of birth
sorted(data, key=lambda item: item['YOB'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Put the name of an animal into the brackets. Hint
Step2: Note
Step3: Another example
Step4: Now we have a function named fibonacci which takes a single argument N, does something with this argument, and returns a value; in this case, a list of the first N Fibonacci numbers
Step5: Notice that there is no type information associated with the function inputs or outputs
Step7: We have redefined the function to include 'a' and 'b' as arguments.
Step8: The values can also be specified by name if desired, in which case the order of the named values does not matter
Step10: A more useful example
Step11: Exercise
Step12: *args and **kwargs
Step13: Here it is not the names args and kwargs that are important, but the * characters preceding them.
Step14: Anonymous (lambda) Functions
Step15: This lambda function is roughly equivalent to
Step16: So why would you ever want to use such a thing?
Step17: Now suppose we want to sort this data.
Step18: But dictionaries are not orderable
|
13,984
|
<ASSISTANT_TASK:>
Python Code:
import rebound
rebound.add("Sun")
rebound.add("Jupiter")
rebound.add("Saturn")
for orbit in rebound.calculate_orbits():
print(orbit)
rebound.add("Churyumov-Gerasimenko")
rebound.dt = -0.01
import numpy as np
Noutputs = 1000
year = 2.*np.pi # One year in units where G=1
times = np.linspace(0.,-70.*year, Noutputs)
x = np.zeros((2,Noutputs))
y = np.zeros((2,Noutputs))
rebound.integrator = "ias15" # IAS15 is the default integrator, so we actually don't need this line
rebound.move_to_com() # We always move to the center of momentum frame before an integration
ps = rebound.particles # ps is now an array of pointers and will change as the simulation runs
for i,time in enumerate(times):
rebound.integrate(time)
x[0][i] = ps[1].x # This stores the data which allows us to plot it later
y[0][i] = ps[1].y
x[1][i] = ps[3].x
y[1][i] = ps[3].y
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5,5))
ax = plt.subplot(111)
ax.set_xlim([-6,6])
ax.set_ylim([-6,6])
plt.plot(x[0], y[0]);
plt.plot(x[1], y[1]);
fig = plt.figure(figsize=(12,5))
ax = plt.subplot(111)
ax.set_xlabel("time [yrs]")
ax.set_ylabel("distance [AU]")
distance = np.sqrt(np.square(x[0]-x[1])+np.square(y[0]-y[1]))
plt.plot(times/year, distance);
closeencountertime = times[np.argmin(distance)]/year
print("Minimum distance (%f AU) occured at time: %f years." % (np.min(distance),closeencountertime))
import datetime
encounterdate = datetime.datetime.today() + datetime.timedelta(days=365.25*closeencountertime)
encounterdate.strftime("%Y-%m-%d %H:%M")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now all the data is in REBOUND! Let's have a look at the orbits of the two planets.
Step2: Although there are three bodies, the get_orbits() function only returns two objects as the orbit for the Sun would be a little boring. The function returns the orbits in Jacobi coordinates. Since we didn't specify a falue for $G$, REBOUND assumes that $G=1$. The unit of length is one astronomical unit, the unit of time is one year/$2\pi$.
Step3: When searching for a body by name, REBOUND takes the first dataset that Horizons offers. In this case, it's a set of parameters from 1962. You probably want to go to the Horizons website and check that the values you are using are up-to-date and appropriate for what you want to do. You can also use more complicted Horizons queries, for example, to get the most recent apparition solution for the comet, use
Step4: While we're integrating, let's store the positions of Jupiter and the comet at 1000 times during the interval. We'll need to prepare a few variables to do that
Step5: Now we're ready to start the integration
Step6: Visualization with matplotlib
Step7: As you can see in the above image, the comet 67P had a rather strong encounter with Jupiter a few years ago. Of course, if you wanted to do a realistic simulation of that encounter, you'd need to include all the other planets and maybe even some non-gravitational effects for the comet. However, let's stick with our simplistic model and try to find out when exactly the two bodies had a close encouter. We already stored the data, so we can just plot their distance as a function of time.
Step8: We can see that the minimum distance occured approximately 56 years ago (as of writing this tutorial). Let's see what date that was using some python magic and the datetime module
|
13,985
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
Image(filename='images/mgxs.png', width=350)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import openmc
import openmc.mgxs as mgxs
# Instantiate a Material and register the Nuclides
inf_medium = openmc.Material(name='moderator')
inf_medium.set_density('g/cc', 5.)
inf_medium.add_nuclide('H1', 0.028999667)
inf_medium.add_nuclide('O16', 0.01450188)
inf_medium.add_nuclide('U235', 0.000114142)
inf_medium.add_nuclide('U238', 0.006886019)
inf_medium.add_nuclide('Zr90', 0.002116053)
# Instantiate a Materials collection and export to XML
materials_file = openmc.Materials([inf_medium])
materials_file.export_to_xml()
# Instantiate boundary Planes
min_x = openmc.XPlane(boundary_type='reflective', x0=-0.63)
max_x = openmc.XPlane(boundary_type='reflective', x0=0.63)
min_y = openmc.YPlane(boundary_type='reflective', y0=-0.63)
max_y = openmc.YPlane(boundary_type='reflective', y0=0.63)
# Instantiate a Cell
cell = openmc.Cell(cell_id=1, name='cell')
# Register bounding Surfaces with the Cell
cell.region = +min_x & -max_x & +min_y & -max_y
# Fill the Cell with the Material
cell.fill = inf_medium
# Create root universe
root_universe = openmc.Universe(name='root universe', cells=[cell])
# Create Geometry and set root Universe
openmc_geometry = openmc.Geometry(root_universe)
# Export to "geometry.xml"
openmc_geometry.export_to_xml()
# OpenMC simulation parameters
batches = 50
inactive = 10
particles = 2500
# Instantiate a Settings object
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
settings_file.output = {'tallies': True}
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-0.63, -0.63, -0.63, 0.63, 0.63, 0.63]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.Source(space=uniform_dist)
# Export to "settings.xml"
settings_file.export_to_xml()
# Instantiate a 2-group EnergyGroups object
groups = mgxs.EnergyGroups()
groups.group_edges = np.array([0., 0.625, 20.0e6])
# Instantiate a few different sections
total = mgxs.TotalXS(domain=cell, groups=groups)
absorption = mgxs.AbsorptionXS(domain=cell, groups=groups)
scattering = mgxs.ScatterXS(domain=cell, groups=groups)
# Note that if we wanted to incorporate neutron multiplication in the
# scattering cross section we would write the previous line as:
# scattering = mgxs.ScatterXS(domain=cell, groups=groups, nu=True)
absorption.tallies
# Instantiate an empty Tallies object
tallies_file = openmc.Tallies()
# Add total tallies to the tallies file
tallies_file += total.tallies.values()
# Add absorption tallies to the tallies file
tallies_file += absorption.tallies.values()
# Add scattering tallies to the tallies file
tallies_file += scattering.tallies.values()
# Export to "tallies.xml"
tallies_file.export_to_xml()
# Run OpenMC
openmc.run()
# Load the last statepoint file
sp = openmc.StatePoint('statepoint.50.h5')
# Load the tallies from the statepoint into each MGXS object
total.load_from_statepoint(sp)
absorption.load_from_statepoint(sp)
scattering.load_from_statepoint(sp)
total.print_xs()
df = scattering.get_pandas_dataframe()
df.head(10)
absorption.export_xs_data(filename='absorption-xs', format='excel')
total.build_hdf5_store(filename='mgxs', append=True)
absorption.build_hdf5_store(filename='mgxs', append=True)
scattering.build_hdf5_store(filename='mgxs', append=True)
# Use tally arithmetic to compute the difference between the total, absorption and scattering
difference = total.xs_tally - absorption.xs_tally - scattering.xs_tally
# The difference is a derived tally which can generate Pandas DataFrames for inspection
difference.get_pandas_dataframe()
# Use tally arithmetic to compute the absorption-to-total MGXS ratio
absorption_to_total = absorption.xs_tally / total.xs_tally
# The absorption-to-total ratio is a derived tally which can generate Pandas DataFrames for inspection
absorption_to_total.get_pandas_dataframe()
# Use tally arithmetic to compute the scattering-to-total MGXS ratio
scattering_to_total = scattering.xs_tally / total.xs_tally
# The scattering-to-total ratio is a derived tally which can generate Pandas DataFrames for inspection
scattering_to_total.get_pandas_dataframe()
# Use tally arithmetic to ensure that the absorption- and scattering-to-total MGXS ratios sum to unity
sum_ratio = absorption_to_total + scattering_to_total
# The sum ratio is a derived tally which can generate Pandas DataFrames for inspection
sum_ratio.get_pandas_dataframe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A variety of tools employing different methodologies have been developed over the years to compute multi-group cross sections for certain applications, including NJOY (LANL), MC$^2$-3 (ANL), and Serpent (VTT). The openmc.mgxs Python module is designed to leverage OpenMC's tally system to calculate multi-group cross sections with arbitrary energy discretizations for fine-mesh heterogeneous deterministic neutron transport applications.
Step2: We being by creating a material for the homogeneous medium.
Step3: With our material, we can now create a Materials object that can be exported to an actual XML file.
Step4: Now let's move on to the geometry. This problem will be a simple square cell with reflective boundary conditions to simulate an infinite homogeneous medium. The first step is to create the outer bounding surfaces of the problem.
Step5: With the surfaces defined, we can now create a cell that is defined by intersections of half-spaces created by the surfaces.
Step6: OpenMC requires that there is a "root" universe. Let us create a root universe and add our square cell to it.
Step7: We now must create a geometry that is assigned a root universe and export it to XML.
Step8: Next, we must define simulation parameters. In this case, we will use 10 inactive batches and 40 active batches each with 2500 particles.
Step9: Now we are ready to generate multi-group cross sections! First, let's define a 2-group structure using the built-in EnergyGroups class.
Step10: We can now use the EnergyGroups object, along with our previously created materials and geometry, to instantiate some MGXS objects from the openmc.mgxs module. In particular, the following are subclasses of the generic and abstract MGXS class
Step11: Each multi-group cross section object stores its tallies in a Python dictionary called tallies. We can inspect the tallies in the dictionary for our Absorption object as follows.
Step12: The Absorption object includes tracklength tallies for the 'absorption' and 'flux' scores in the 2-group structure in cell 1. Now that each MGXS object contains the tallies that it needs, we must add these tallies to a Tallies object to generate the "tallies.xml" input file for OpenMC.
Step13: Now we a have a complete set of inputs, so we can go ahead and run our simulation.
Step14: Tally Data Processing
Step15: In addition to the statepoint file, our simulation also created a summary file which encapsulates information about the materials and geometry. By default, a Summary object is automatically linked when a StatePoint is loaded. This is necessary for the openmc.mgxs module to properly process the tally data.
Step16: Voila! Our multi-group cross sections are now ready to rock 'n roll!
Step17: Since the openmc.mgxs module uses tally arithmetic under-the-hood, the cross section is stored as a "derived" Tally object. This means that it can be queried and manipulated using all of the same methods supported for the Tally class in the OpenMC Python API. For example, we can construct a Pandas DataFrame of the multi-group cross section data.
Step18: Each multi-group cross section object can be easily exported to a variety of file formats, including CSV, Excel, and LaTeX for storage or data processing.
Step19: The following code snippet shows how to export all three MGXS to the same HDF5 binary data store.
Step20: Comparing MGXS with Tally Arithmetic
Step21: Similarly, we can use tally arithmetic to compute the ratio of AbsorptionXS and ScatterXS to the TotalXS.
Step22: Lastly, we sum the derived scatter-to-total and absorption-to-total ratios to confirm that they sum to unity.
|
13,986
|
<ASSISTANT_TASK:>
Python Code:
!pip install -q -U tensorflow==2.1
!pip install -U -q google-api-python-client
!pip install -U -q pandas
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-Id]" #@param {type:"string"}
BUCKET = '[your-bucket-name]' #@param {type:"string"}
REGION = 'us-central1' #@param {type:"string"}
!gcloud config set project $PROJECT_ID
try:
from google.colab import auth
auth.authenticate_user()
print("Colab user is authenticated.")
except: pass
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import pandas as pd
from google.cloud import bigquery
print("TF version: {}".format(tf.__version__))
LOCAL_WORKSPACE = './workspace'
LOCAL_DATA_DIR = os.path.join(LOCAL_WORKSPACE, 'data')
BQ_DATASET_NAME = 'prediction_logs'
BQ_TABLE_NAME = 'covertype_classifier_logs'
MODEL_NAME = 'covertype_classifier'
VERSION_NAME = 'v1'
TRAINING_DIR = os.path.join(LOCAL_WORKSPACE, 'training')
MODEL_DIR = os.path.join(TRAINING_DIR, 'exported_model')
if tf.io.gfile.exists(LOCAL_WORKSPACE):
print("Removing previous workspace artifacts...")
tf.io.gfile.rmtree(LOCAL_WORKSPACE)
print("Creating a new workspace...")
tf.io.gfile.makedirs(LOCAL_WORKSPACE)
tf.io.gfile.makedirs(LOCAL_DATA_DIR)
LOCAL_TRAIN_DATA = os.path.join(LOCAL_DATA_DIR, 'train.csv')
LOCAL_EVAL_DATA = os.path.join(LOCAL_DATA_DIR, 'eval.csv')
!gsutil cp gs://workshop-datasets/covertype/data_validation/training/dataset.csv {LOCAL_TRAIN_DATA}
!gsutil cp gs://workshop-datasets/covertype/data_validation/evaluation/dataset.csv {LOCAL_EVAL_DATA}
!wc -l {LOCAL_TRAIN_DATA}
sample = pd.read_csv(LOCAL_TRAIN_DATA).head()
sample.T
HEADER = ['Elevation', 'Aspect', 'Slope','Horizontal_Distance_To_Hydrology',
'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways',
'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points', 'Wilderness_Area', 'Soil_Type',
'Cover_Type']
TARGET_FEATURE_NAME = 'Cover_Type'
FEATURE_LABELS = ['0', '1', '2', '3', '4', '5', '6']
NUMERIC_FEATURE_NAMES = ['Aspect', 'Elevation', 'Hillshade_3pm',
'Hillshade_9am', 'Hillshade_Noon',
'Horizontal_Distance_To_Fire_Points',
'Horizontal_Distance_To_Hydrology',
'Horizontal_Distance_To_Roadways','Slope',
'Vertical_Distance_To_Hydrology']
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
'Soil_Type': ['2702', '2703', '2704', '2705', '2706', '2717', '3501', '3502',
'4201', '4703', '4704', '4744', '4758', '5101', '6101', '6102',
'6731', '7101', '7102', '7103', '7201', '7202', '7700', '7701',
'7702', '7709', '7710', '7745', '7746', '7755', '7756', '7757',
'7790', '8703', '8707', '8708', '8771', '8772', '8776'],
'Wilderness_Area': ['Cache', 'Commanche', 'Neota', 'Rawah']
}
FEATURE_NAMES = list(CATEGORICAL_FEATURES_WITH_VOCABULARY.keys()) + NUMERIC_FEATURE_NAMES
HEADER_DEFAULTS = [[0] if feature_name in NUMERIC_FEATURE_NAMES + [TARGET_FEATURE_NAME] else ['NA']
for feature_name in HEADER]
NUM_CLASSES = len(FEATURE_LABELS)
RANDOM_SEED = 19830610
import multiprocessing
def create_dataset(file_pattern,
batch_size=128, num_epochs=1, shuffle=False):
dataset = tf.data.experimental.make_csv_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
column_names=HEADER,
column_defaults=HEADER_DEFAULTS,
label_name=TARGET_FEATURE_NAME,
field_delim=',',
header=True,
num_epochs=num_epochs,
shuffle=shuffle,
shuffle_buffer_size=(5 * batch_size),
shuffle_seed=RANDOM_SEED,
num_parallel_reads=multiprocessing.cpu_count(),
sloppy=True,
)
return dataset.cache()
index = 1
for batch in create_dataset(LOCAL_TRAIN_DATA, batch_size=5, shuffle=False).take(2):
print("Batch: {}".format(index))
print("========================")
record, target = batch
print("Input features:")
for key in record:
print(" - {}:{}".format(key, record[key].numpy()))
print("Target: {}".format(target))
index += 1
print()
import math
def create_feature_columns():
feature_columns = []
for feature_name in FEATURE_NAMES:
# Categorical features
if feature_name in CATEGORICAL_FEATURES_WITH_VOCABULARY:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
vocab_size = len(vocabulary)
# Create embedding column for categotical feature column with vocabulary
embedding_feature_column = tf.feature_column.embedding_column(
categorical_column = tf.feature_column.categorical_column_with_vocabulary_list(
key=feature_name,
vocabulary_list=vocabulary), dimension=int(math.sqrt(vocab_size) + 1))
feature_columns.append(embedding_feature_column)
# Numeric features
else:
numeric_column = tf.feature_column.numeric_column(feature_name)
feature_columns.append(numeric_column)
return feature_columns
feature_columns = create_feature_columns()
for column in feature_columns:
print(column)
def create_model(params):
feature_columns = create_feature_columns()
layers = []
layers.append(tf.keras.layers.DenseFeatures(feature_columns))
for units in params.hidden_units:
layers.append(tf.keras.layers.Dense(units=units, activation='relu'))
layers.append(tf.keras.layers.BatchNormalization())
layers.append(tf.keras.layers.Dropout(rate=params.dropout))
layers.append(tf.keras.layers.Dense(units=NUM_CLASSES, activation='softmax'))
model = tf.keras.Sequential(layers=layers, name='classifier')
adam_optimzer = tf.keras.optimizers.Adam(learning_rate=params.learning_rate)
model.compile(
optimizer=adam_optimzer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
)
return model
def run_experiment(model, params):
# TensorBoard callback
LOG_DIR = os.path.join(TRAINING_DIR, 'logs')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=LOG_DIR)
# early stopping callback
earlystopping_callback = tf.keras.callbacks.EarlyStopping(
monitor='val_sparse_categorical_accuracy',
patience=3,
restore_best_weights=True
)
callbacks = [
tensorboard_callback,
earlystopping_callback]
# train dataset
train_dataset = create_dataset(
LOCAL_TRAIN_DATA,
batch_size=params.batch_size,
shuffle=True)
# eval dataset
eval_dataset = create_dataset(
LOCAL_EVAL_DATA,
batch_size=params.batch_size)
# Prep training directory
if tf.io.gfile.exists(TRAINING_DIR):
print("Removing previous training artefacts...")
tf.io.gfile.rmtree(TRAINING_DIR)
print("Creating training directory...")
tf.io.gfile.mkdir(TRAINING_DIR)
print("Experiment started...")
print(".......................................")
# Run train and evaluate.
history = model.fit(
x=train_dataset,
epochs=params.epochs,
callbacks=callbacks,
validation_data=eval_dataset,
)
print(".......................................")
print("Experiment finished.")
print("")
return history
class Parameters():
pass
TRAIN_DATA_SIZE = 431010
params = Parameters()
params.learning_rate = 0.01
params.hidden_units = [128, 128]
params.dropout = 0.15
params.batch_size = 265
params.steps_per_epoch = int(math.ceil(TRAIN_DATA_SIZE / params.batch_size))
params.epochs = 10
model = create_model(params)
example_batch, _ = list(
create_dataset(LOCAL_TRAIN_DATA, batch_size=2, shuffle=True).take(1))[0]
model(example_batch)
model.summary()
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
history = run_experiment(model, params)
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(w=(10, 5))
# Plot training & validation accuracy values
ax1.plot(history.history['sparse_categorical_accuracy'])
ax1.plot(history.history['val_sparse_categorical_accuracy'])
ax1.set_title('Model accuracy')
ax1.set(xlabel='Accuracy', ylabel='Epoch')
ax1.legend(['Train', 'Eval'], loc='upper left')
# Plot training & validation loss values
ax2.plot(history.history['loss'])
ax2.plot(history.history['val_loss'])
ax2.set_title('Model loss')
ax2.set(xlabel='Accuracy', ylabel='Epoch')
ax2.legend(['Train', 'Eval'], loc='upper left')
MODEL_OUTPUT_KEY = 'probabilities'
SIGNATURE_NAME = 'serving_default'
def make_features_serving_fn(model):
@tf.function
def serve_features_fn(features):
return {MODEL_OUTPUT_KEY: model(features)}
return serve_features_fn
feature_spec = {}
for feature_name in FEATURE_NAMES:
if feature_name in CATEGORICAL_FEATURES_WITH_VOCABULARY:
feature_spec[feature_name] = tf.io.FixedLenFeature(
shape=(1,), dtype=tf.string)
else:
feature_spec[feature_name] = tf.io.FixedLenFeature(
shape=(1,), dtype=tf.float32)
for key, value in feature_spec.items():
print("{}: {}".format(key, value))
features_input_signature = {
feature: tf.TensorSpec(shape=spec.shape, dtype=spec.dtype, name=feature)
for feature, spec in feature_spec.items()
}
signatures = {
SIGNATURE_NAME: make_features_serving_fn(model).get_concrete_function(
features_input_signature),
}
model.save(MODEL_DIR, save_format='tf', signatures=signatures)
print("Model is exported to: {}.".format(MODEL_DIR))
!saved_model_cli show --dir {MODEL_DIR} --tag_set serve --signature_def {SIGNATURE_NAME}
instances = [
{
'Soil_Type': '7202',
'Wilderness_Area': 'Commanche',
'Aspect': 61,
'Elevation': 3091,
'Hillshade_3pm': 129,
'Hillshade_9am': 227,
'Hillshade_Noon': 223,
'Horizontal_Distance_To_Fire_Points': 2868,
'Horizontal_Distance_To_Hydrology': 134,
'Horizontal_Distance_To_Roadways': 0,
'Slope': 8,
'Vertical_Distance_To_Hydrology': 10,
}
]
import numpy as np
def create_tf_features(instance):
new_instance = {}
for key, value in instance.items():
if key in CATEGORICAL_FEATURES_WITH_VOCABULARY:
new_instance[key] = tf.constant(value, dtype=tf.string)
else:
new_instance[key] = tf.constant(value, dtype=tf.float32)
return new_instance
features_predictor = tf.saved_model.load(MODEL_DIR).signatures[SIGNATURE_NAME]
def local_predict(instance):
features = create_tf_features(instance)
probabilities = features_predictor(**features)[MODEL_OUTPUT_KEY].numpy()
predictions = FEATURE_LABELS[int(np.argmax(probabilities))]
return predictions
local_predict(instances[0])
!gsutil rm -r gs://{BUCKET}/models/{MODEL_NAME}
!gsutil cp -r {MODEL_DIR} gs://{BUCKET}/models/{MODEL_NAME}
!gcloud ai-platform models create {MODEL_NAME} \
--project {PROJECT_ID} \
--regions {REGION}
# list the models
!gcloud ai-platform models list --project {PROJECT_ID}
!gcloud ai-platform versions create {VERSION_NAME} \
--model={MODEL_NAME} \
--origin=gs://{BUCKET}/models/{MODEL_NAME} \
--runtime-version=2.1 \
--framework=TENSORFLOW \
--python-version=3.7 \
--project={PROJECT_ID}
# list the model versions
!gcloud ai-platform versions list --model={MODEL_NAME} --project={PROJECT_ID}
import googleapiclient.discovery
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}/versions/{}'.format(PROJECT_ID, MODEL_NAME, VERSION_NAME)
print("Service name: {}".format(name))
def caip_predict(instances):
serving_instances = []
for instance in instances:
serving_instances.append(
{key: [value] for key, value in instance.items()})
request_body={
'signature_name': SIGNATURE_NAME,
'instances': serving_instances}
response = service.projects().predict(
name=name,
body=request_body
).execute()
if 'error' in response:
raise RuntimeError(response['error'])
probability_list = [output[MODEL_OUTPUT_KEY] for output in response['predictions']]
classes = [FEATURE_LABELS[int(np.argmax(probabilities))] for probabilities in probability_list]
return classes
caip_predict(instances)
client = bigquery.Client(PROJECT_ID)
dataset_names = [dataset.dataset_id for dataset in client.list_datasets(PROJECT_ID)]
dataset = bigquery.Dataset("{}.{}".format(PROJECT_ID, BQ_DATASET_NAME))
dataset.location = "US"
if BQ_DATASET_NAME not in dataset_names:
dataset = client.create_dataset(dataset)
print("Created dataset {}.{}".format(client.project, dataset.dataset_id))
print("BigQuery Dataset is ready.")
import json
table_schema_json = [
{
"name": "model",
"type": "STRING",
"mode": "REQUIRED"
},
{
"name":"model_version",
"type": "STRING",
"mode":"REQUIRED"
},
{
"name":"time",
"type": "TIMESTAMP",
"mode": "REQUIRED"
},
{
"name":"raw_data",
"type": "STRING",
"mode": "REQUIRED"
},
{
"name":"raw_prediction",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name":"groundtruth",
"type": "STRING",
"mode": "NULLABLE"
},
]
json.dump(
table_schema_json, open('table_schema.json', 'w'))
table = bigquery.Table(
"{}.{}.{}".format(PROJECT_ID, BQ_DATASET_NAME, BQ_TABLE_NAME))
table_names = [table.table_id for table in client.list_tables(dataset)]
if BQ_TABLE_NAME in table_names:
print("Deleteing BQ Table: {} ...".format(BQ_TABLE_NAME))
client.delete_table(table)
TIME_PARTITION_EXPERIATION = int(60 * 60 * 24 * 7)
!bq mk --table \
--project_id={PROJECT_ID} \
--time_partitioning_field=time \
--time_partitioning_type=DAY \
--time_partitioning_expiration={TIME_PARTITION_EXPERIATION} \
{PROJECT_ID}:{BQ_DATASET_NAME}.{BQ_TABLE_NAME} \
'table_schema.json'
sampling_percentage = 1.0
bq_full_table_name = '{}.{}.{}'.format(PROJECT_ID, BQ_DATASET_NAME, BQ_TABLE_NAME)
logging_config = {
"requestLoggingConfig":{
"samplingPercentage": sampling_percentage,
"bigqueryTableName": bq_full_table_name
}
}
service.projects().models().versions().patch(
name=name,
body=logging_config,
updateMask="requestLoggingConfig"
).execute()
import time
for i in range(10):
caip_predict(instances)
print('.', end='')
time.sleep(0.1)
query = '''
SELECT * FROM
`{}.{}`
WHERE model_version = '{}'
ORDER BY time desc
LIMIT {}
'''.format(BQ_DATASET_NAME, BQ_TABLE_NAME, VERSION_NAME, 3)
pd.io.gbq.read_gbq(
query, project_id=PROJECT_ID).T
view_name = "vw_"+BQ_TABLE_NAME+"_"+VERSION_NAME
colum_names = FEATURE_NAMES
input_features = ', \r\n '.join(colum_names)
json_features_extraction = []
for feature_name in colum_names:
s = "JSON_EXTRACT(instance, '$.{}')".format(feature_name)
if feature_name in NUMERIC_FEATURE_NAMES:
s = "CAST({} AS NUMERIC)".format(s)
s += " AS {}".format(feature_name)
json_features_extraction.append(s)
json_features_extraction = ', \r\n '.join(json_features_extraction)
class_probability_pivoting = []
for class_index, class_label in enumerate(FEATURE_LABELS):
s = "CAST(MAX(IF(class_index = {}, class_probability, NULL)) as FLOAT64) as prob_{}".format(class_index, class_label)
class_probability_pivoting.append(s)
class_probability_pivoting = ', \r\n '.join(class_probability_pivoting)
class_prob = []
for class_label in FEATURE_LABELS:
s = 'prob_{}'.format(class_label)
class_prob.append(s)
class_prob = ', \r\n '.join(class_prob)
case_conditions = []
for class_label in FEATURE_LABELS:
s = 'WHEN prob_max = prob_{} THEN {}'.format(class_label, class_label)
case_conditions.append(s)
case_conditions = ' \r\n '.join(case_conditions)
sql_script = '''
CREATE OR REPLACE VIEW @dataset_name.@view_name
AS
WITH step1
AS
(
SELECT
model,
model_version,
time,
SPLIT(JSON_EXTRACT(raw_data, '$.instances'), '}],[{') instance_list,
SPLIT(JSON_EXTRACT(raw_prediction, '$.predictions'), '}],[{') as prediction_list
FROM
`@project.@dataset_name.@table_name`
WHERE
model = '@model_name' AND
model_version = '@version'
),
step2
AS
(
SELECT
model,
model_version,
time,
REPLACE(REPLACE(instance, '[', ''),']', '') AS instance,
REPLACE(REPLACE(prediction, '[{"@model_output_key":[', ''),']}]', '') AS prediction,
FROM step1
JOIN UNNEST(step1.instance_list) AS instance
WITH OFFSET AS f1
JOIN UNNEST(step1.prediction_list) AS prediction
WITH OFFSET AS f2
ON f1=f2
),
step3 AS
(
SELECT
model,
model_version,
time,
@json_features_extraction,
SPLIT(prediction, ',') AS class_probabilities,
FROM step2
),
step4
AS
(
SELECT * EXCEPT(class_probabilities)
FROM step3
JOIN UNNEST(step3.class_probabilities) AS class_probability
WITH OFFSET AS class_index
),
step5
AS
(
SELECT
model,
model_version,
time,
@input_features,
@class_probability_pivoting,
MAX(CAST(class_probability AS FLOAT64)) as prob_max
FROM step4
GROUP BY
model,
model_version,
time,
@input_features
)
SELECT
model,
model_version,
time,
@input_features,
@class_prob,
CASE
@case_conditions
END as predicted_class
FROM
step5
'''
sql_script = sql_script.replace("@project", PROJECT_ID)
sql_script = sql_script.replace("@dataset_name", BQ_DATASET_NAME)
sql_script = sql_script.replace("@table_name", BQ_TABLE_NAME)
sql_script = sql_script.replace("@view_name", view_name)
sql_script = sql_script.replace("@model_name", MODEL_NAME)
sql_script = sql_script.replace("@version", VERSION_NAME)
sql_script = sql_script.replace("@input_features", input_features)
sql_script = sql_script.replace("@json_features_extraction", json_features_extraction)
sql_script = sql_script.replace("@model_output_key", MODEL_OUTPUT_KEY)
sql_script = sql_script.replace("@class_probability_pivoting", class_probability_pivoting)
sql_script = sql_script.replace("@class_prob", class_prob)
sql_script = sql_script.replace("@case_conditions", case_conditions)
print(sql_script)
client.query(query = sql_script)
print("View was created or replaced.")
query = '''
SELECT * FROM
`{}.{}`
LIMIT {}
'''.format(BQ_DATASET_NAME, view_name, 3)
pd.io.gbq.read_gbq(
query, project_id=PROJECT_ID).T
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up your GCP project and GCS bucket
Step2: Authenticate your GCP account
Step3: Import libraries
Step4: Define constants
Step5: Create a local workspace
Step6: 1. Dataset preparation and schema generation
Step7: View a sample of the downloaded data
Step8: 1.2 Define metadata
Step9: 2. Model training and evaluation
Step10: The following code test reading some batches of data using the data input function.
Step11: 2.2. Create feature columns
Step12: The following code tests the feature columns to be created.
Step13: 2.3. Create and compile the model
Step14: 2.4. Train and evaluate experiment
Step15: Hyper parameters
Step16: Run experiment
Step17: Visualize training history
Step18: 3. Model export for serving
Step19: 3.1. Implement serving input receiver functions
Step20: Feature spec
Step21: 3.2. Export the model
Step22: Verify the signature (inputs and outputs) of the exported model using saved_model_cli
Step23: 3.3. Test exported model locally
Step24: Prepare the sample instance in the format expected by the model signature
Step25: Load the SavedModel for prediction, and create a function that generates the prediction pobabilities from the model to return the class label with the highest probability
Step26: Predict using the local SavedModel
Step27: 3.4 Upload exported model to GCS
Step28: 4. Model deployment to AI Platform
Step29: 4.2. Create a model version
Step30: 4.3. Test deployed model
Step31: Predict using AI Platform Prediction
Step32: 5. BigQuery logging dataset preparation
Step33: 5.2. Create the BigQuery table to store the logs
Step34: Ceating an ingestion-time partitioned tables
Step35: 5.3. Configre the AI Platform model version to enable request-response logging to BigQuery
Step36: 5.4. Test request-response logging
Step37: Query the logged request-reponse entries in BigQuery
Step38: 6. BigQuery logs parsing
Step39: Print the generated script
Step40: 6.2. Create a view to parse the logs
Step41: 6.3. Query the view
|
13,987
|
<ASSISTANT_TASK:>
Python Code:
# Librerias utilizadas
import pandas as pd
import sys
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from SUN.asignar_sun import asignar_sun
from SUN_integridad.SUN_integridad import SUN_integridad
from SUN.CargaSunPrincipal import getsun
# Configuracion del sistema
import sys; print('Python {} on {}'.format(sys.version, sys.platform))
print('Pandas version: {}'.format(pd.__version__))
import platform; print('Running on {} {}'.format(platform.system(), platform.release()))
# Carga del dataset "CiudadesPIGOO_ClaveInegi.xlsx" al sistema
pigoo_inegi = r'D:\PCCS\01_Dmine\Datasets\Pigoo\CiudadesPIGOO_ClaveInegi.xlsx'
pigoo_inegi_df = pd.read_excel(pigoo_inegi, sheetname='OOAPAS-PIGOO', index_col=0,
dtype={'Clave-Estado-Inegi': str,
'Clave-Municipio-Inegi': str,
'Clave-Localidad-Inegi': str})
pigoo_inegi_df.head()
# Estandarizacion de clave geoestadistica municipal a 5 dígitos
pigoo_inegi_df['CVE_MUN'] = pigoo_inegi_df['Clave-Estado-Inegi'].map(str) + pigoo_inegi_df['Clave-Municipio-Inegi']
# Clasificación de acuerdo al Sistema Urbano Nacional
variables_SUN = ['CVE_MUN', 'NOM_MUN', 'CVE_SUN', 'NOM_SUN', 'TIPO_SUN', 'NOM_ENT']
pigoo_sun = asignar_sun(pigoo_inegi_df, vars=variables_SUN)
Columnas = ['Nombre- PIGOO', 'CVE_MUN', 'NOM_MUN', 'CVE_SUN', 'NOM_SUN', 'TIPO_SUN', 'NOM_ENT', 'Organismo Operador', 'Siglas', 'Loc-Inegi']
pigoo_sun[Columnas].head()
sun = getsun()
sun.head()
pigoo_inegi_df.loc[42]
writer = pd.ExcelWriter(r'D:\PCCS\01_Dmine\Datasets\Pigoo\pigoo_notreviewd.xlsx')
pigoo_inegi_df[~ciudades_revisadas].to_excel(writer, sheet_name ='datos')
writer.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Revisión y estandarización inicial al DataSet Pigoo
Step2: Gracias a que este dataset ya contiene etiquetas con claves geoestadísticas, es posible clasificarlas a su vez dentro de acuerdo con el Sistema Urbano Nacional. Para hacer la clasificación, se utiliza un algoritmo elaborado previamente que identifica la clave geoestadística municipal de 5 dígitos ("CVE_MUN") en cada renglón y la clasifica el municipio identificado asignándole la clave del Sistema Urbano Nacional ("CVE_SUN") a la que pertenece.
Step3: 2. Ciudad de México
Step4: En el Dataset de PIGOO la Ciudad de México está considerada como una unidad monolítica
|
13,988
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import scipy.stats as sp
# %matplotlib notebook
%matplotlib inline
import seaborn as sns; sns; sns.set_style('dark')
import statsmodels.api as sm
import matplotlib.pyplot as plt
df = pd.read_csv('turnstile_data_master_with_weather.csv')
df.index = pd.to_datetime(df.pop('DATEn') +' '+ df.pop('TIMEn'))
df.sort_index(inplace=True)
del df['Unnamed: 0']
df.head()
df.groupby('rain',as_index=False).ENTRIESn_hourly.mean()
df.groupby('rain',as_index=False).ENTRIESn_hourly.mean()
sp.mannwhitneyu(df.ix[df.rain==0,'ENTRIESn_hourly'],
df.ix[df.rain==1,'ENTRIESn_hourly'])
length = df.shape[0]
subset = df.take(np.random.permutation(length)[:int(length*0.1)]).reset_index()
dummy_hours = pd.get_dummies(subset['Hour'], prefix='hour')
dummy_units = pd.get_dummies(subset['UNIT'], prefix='unit')
# features = subset.join(dummy_units).join(dummy_hours)
features = subset
banned = ['ENTRIESn_hourly','UNIT','Hour','DESCn','EXITSn_hourly','index']
candidates = [e for e in features.columns if e not in banned]
def test_adjusted_R_squared(col):
Testing one variable with already approved predictors
reg = sm.OLS(features['ENTRIESn_hourly'],features[predictors + [col]])
result = reg.fit()
return result.rsquared_adj
predictors = []
topr2 = 0
for i in xrange(len(candidates)):
filtered = filter(lambda x: x not in predictors, candidates)
list_r2 = map(test_adjusted_R_squared,filtered)
highest,curr_topr2 = max(zip(filtered,list_r2),key=lambda x: x[1])
if curr_topr2 > topr2:
topr2 = round(curr_topr2,10)
else:
print("Adjusted R Squared can't go any higher. Stopping")
break
predictors.append(highest)
print('Step {}: Adjusted R-squared = {} + {}'.format(i,topr2,highest))
predictors
print('Scatter Matrix of features and predictors to test collinearity');
pd.scatter_matrix(features[numerics],figsize=(10,10));
features_dummy = features[predictors].join(dummy_units).join(dummy_hours)
model = sm.OLS(features['ENTRIESn_hourly'],features_dummy).fit()
filter_cols = lambda col: not col.startswith('unit') and not col.startswith('hour')
model.params[model.params.index.map(filter_cols)]
model.rsquared
fig,axes = plt.subplots(nrows=1,ncols=2,sharex=True,sharey=True,squeeze=False)
filtered = df.ix[df.ENTRIESn_hourly < 10000]
for i in xrange(1):
axes[0][i].set_xlabel('Number of ridership hourly')
axes[0][i].set_ylabel('Frequency')
filtered.ix[filtered.rain == 0,'ENTRIESn_hourly'].hist(ax=axes[0][0],bins=50)
axes[0][0].set_title('Non-rainy days')
filtered.ix[filtered.rain == 1,'ENTRIESn_hourly'].hist(ax=axes[0][1],bins=50)
axes[0][1].set_title('Rainy days')
fig.set_size_inches((15,5))
(df
.resample('1D',how='mean')
.groupby(lambda x : 1 if pd.datetools.isBusinessDay(x) else 0)
.ENTRIESn_hourly
.plot(legend=True))
plt.legend(['Not Business Day', 'Business Day'])
plt.xlabel('By day in May 2011')
plt.ylabel('Average number of ridership hourly')
plt.title('Average number of ridership every day at in May 2011');
df['BusinessDay'] = df.index.map(lambda x : 0 if pd.datetools.isBusinessDay(x) else 1)
df.resample('1D').rain.value_counts()
fig,axes = plt.subplots(nrows=1,ncols=3,sharey=True,squeeze=False)
numerics = ['maxpressurei', 'mintempi', 'precipi']
for i in xrange(len(numerics)):
axes[0][i].scatter(x=features[numerics[i]],y=model.resid,alpha=0.1)
axes[0][i].set_xlabel(numerics[i])
axes[0][0].set_ylabel('final model residuals')
axes[0][1].set_title('linear relationships between features and residual, alpha 0.1')
fig.set_size_inches(12,5);
fig,axes = plt.subplots(nrows=1,ncols=2,squeeze=False)
sp.probplot(model.resid,plot=axes[0][0])
model.resid.hist(bins=20,ax=axes[0][1]);
axes[0][1].set_title('Histogram of residuals')
axes[0][1].set_xlabel('Residuals')
axes[0][1].set_ylabel('Frequency');
fig,axes = plt.subplots(nrows=1,ncols=2,squeeze=False)
axes[0][0].scatter(x=model.fittedvalues, y=model.resid, alpha=0.1)
axes[0][1].scatter(x=model.fittedvalues, y=abs(model.resid), alpha=0.1);
axes[0][0].set_xlabel('fitted_values')
axes[0][1].set_xlabel('fitted_values')
axes[0][0].set_ylabel('Abs(residuals)')
axes[0][1].set_ylabel('residuals');
fig.set_size_inches(13,5)
resids = pd.DataFrame(model.resid.copy())
resids.columns = ['residuals']
resids.index = pd.to_datetime(features['index'])
resids.sort_index(inplace=True)
plt.plot_date(x=resids.resample('1H',how='mean').index,
y=resids.resample('1H',how='mean').residuals);
plt.xlabel('Time Series')
plt.ylabel('residuals')
plt.title('Residuals Variability across time');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: References
Step2: In this data, we can see summary statistic of number of ridership hourly, represented by ENTRIESn_hourly variable between rainy days and non-rainy days. So the independent variable is rain that represented as non-rainy day in control group, and non-rainy in experiment group. How rainy days affect the number of ridership, so the dependent variable is ENTRIESn_hourly.
Step3: We're using Mann-Whitney U test with average 1090 hourly ridership on non-rainy days and 1105 hourly ridership on rainy days. Because p-value is 0.025 less than 0.05 p-critical, we reject the null hypothesis, and conclude that the data provide convincing evidence that average number of hourly ridership in rainy days is higher than those of non-rainy days.
Step5: R squared is not a significant measures for testing our model. Since every time we're adding a variable, R-squared will keep increasing. We're going to use adjusted R-squared, since it will incorporate penalty everytime we're adding a variable.
Step6: I'm going to choose forward selection, where I add one variable at a time based on highest adjusted R squared. And I will stop adding a variable if there's isnt anymore increase compared to previous adjusted R squared.
Step7: These are non dummy features after I perform forward selection
Step8: To test collinearity that may happen in my numerical features, I use scatter matrix.
Step9: I can see that there are no collinearity among the predictors.
Step10: R2 is often interpreted as the proportion of response variation "explained" by the regressors in the model. So we can say 61.67% of the variability in the % number of ridership subway hourly can be explained by the model.
Step11: In this plot, we can see that more people is riding the subway. But we want to know whether the difference is significance, using hypothesis test. The frequency is indeed higher for non-rainy days compared to non-rainy days.
Step12: We can see that the difference is likely siginificant of ridership from the time of day. We can create a new variable to turn this into categorical variable.
Step13: Conclusion
Step14: We see that eventhough seems categorical maxpressurei and mintempi is random scatter. But precipi is not a good candidate for linear relationship of the model. It seems it's not randomly scattered.
Step15: Next, we're checking by histogram that the residuals is normally distributed. The histogram shown that it's pretty normal and distributed around zero. Quantile plot checking if the residuals randomly scattered around zero. We can see that our model failed in this test. The residuals is very skewed, explained by large number of points deviated from mean line at tails area. This means that our linear regression is not a good model for this case.
Step16: The model also failed in this diagnostic. The first plot, the fitted values and residuals should be randomly scattered around zero, and not performing some kind of fan shape. For the plot in the left, we're seeing that there's some kind of boundary that limit the plot to be randomly scattered, and it's performing fan shape. This could means there's another dependent variables that we don't yet find. Some fan shape also ocurring where we plot in the right with absolute value of residuals.
|
13,989
|
<ASSISTANT_TASK:>
Python Code:
# For use in Quantopian Research, exploring interactively
from quantopian.interactive.data.quandl import cboe_vxxle as dataset
# import data operations
from odo import odo
# import other libraries we will use
import pandas as pd
# Let's use blaze to understand the data a bit using Blaze dshape()
dataset.dshape
# And how many rows are there?
# N.B. we're using a Blaze function to do this, not len()
dataset.count()
# Let's see what the data looks like. We'll grab the first three rows.
dataset[:3]
# Plotting this DataFrame
df = odo(dataset, pd.DataFrame)
df.head(5)
# So we can plot it, we'll set the index as the `asof_date`
df['asof_date'] = pd.to_datetime(df['asof_date'])
df = df.set_index(['asof_date'])
df.head(5)
import matplotlib.pyplot as plt
df['open_'].plot(label=str(dataset))
plt.ylabel(str(dataset))
plt.legend()
plt.title("Graphing %s since %s" % (str(dataset), min(df.index)))
# Import necessary Pipeline modules
from quantopian.pipeline import Pipeline
from quantopian.research import run_pipeline
from quantopian.pipeline.factors import AverageDollarVolume
# Import the datasets available
from quantopian.pipeline.data.quandl import cboe_vxxle
print "Here are the list of available fields per dataset:"
print "---------------------------------------------------\n"
def _print_fields(dataset):
print "Dataset: %s\n" % dataset.__name__
print "Fields:"
for field in list(dataset.columns):
print "%s - %s" % (field.name, field.dtype)
print "\n"
_print_fields(cboe_vxxle)
print "---------------------------------------------------\n"
pipe = Pipeline()
pipe.add(cboe_vxxle.open_.latest, 'open_vxxle')
# Setting some basic liquidity strings (just for good habit)
dollar_volume = AverageDollarVolume(window_length=20)
top_1000_most_liquid = dollar_volume.rank(ascending=False) < 1000
pipe.set_screen(top_1000_most_liquid & cboe_vxxle.open_.latest.notnan())
# The show_graph() method of pipeline objects produces a graph to show how it is being calculated.
pipe.show_graph(format='png')
# run_pipeline will show the output of your pipeline
pipe_output = run_pipeline(pipe, start_date='2013-11-01', end_date='2013-11-25')
pipe_output
# This section is only importable in the backtester
from quantopian.algorithm import attach_pipeline, pipeline_output
# General pipeline imports
from quantopian.pipeline import Pipeline
from quantopian.pipeline.factors import AverageDollarVolume
# For use in your algorithms via the pipeline API
from quantopian.pipeline.data.quandl import cboe_vxxle
def make_pipeline():
# Create our pipeline
pipe = Pipeline()
# Screen out penny stocks and low liquidity securities.
dollar_volume = AverageDollarVolume(window_length=20)
is_liquid = dollar_volume.rank(ascending=False) < 1000
# Create the mask that we will use for our percentile methods.
base_universe = (is_liquid)
# Add the datasets available
pipe.add(cboe_vxxle.open_.latest, 'vxxle_open')
# Set our pipeline screens
pipe.set_screen(is_liquid)
return pipe
def initialize(context):
attach_pipeline(make_pipeline(), "pipeline")
def before_trading_start(context, data):
results = pipeline_output('pipeline')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's go over the columns
Step2: <a id='pipeline'></a>
Step3: Now that we've imported the data, let's take a look at which fields are available for each dataset.
Step4: Now that we know what fields we have access to, let's see what this data looks like when we run it through Pipeline.
Step5: Here, you'll notice that each security is mapped to the corresponding value, so you could grab any security to get what you need.
|
13,990
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
iris = load_iris()
print(iris.data.shape)
measurements = [
{'city': 'Dubai', 'temperature': 33.},
{'city': 'London', 'temperature': 12.},
{'city': 'San Francisco', 'temperature': 18.}
]
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer()
vec
vec.fit_transform(measurements).toarray()
vec.get_feature_names()
import os
import pandas as pd
titanic = pd.read_csv(os.path.join('datasets', 'titanic3.csv'))
print(titanic.columns)
titanic.head()
labels = titanic.survived.values
features = titanic[['pclass', 'sex', 'age', 'sibsp', 'parch', 'fare', 'embarked']]
features.head()
pd.get_dummies(features).head()
features_dummies = pd.get_dummies(features, columns=['pclass', 'sex', 'embarked'])
features_dummies.head(n=16)
#También podríamos hacerlo con DictVectorizer
from sklearn.feature_extraction import DictVectorizer
diccionario = features.to_dict('records')
vec = DictVectorizer()
dataset = vec.fit_transform(diccionario)
print(dataset.todense())
data = features_dummies.values
# Comprobamos que hay valores perdidos, tendremos que aplicar un Imputer
import numpy as np
np.isnan(data).any()
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
train_data, test_data, train_labels, test_labels = train_test_split(
data, labels, random_state=0)
imp = SimpleImputer()
imp.fit(train_data)
train_data_finite = imp.transform(train_data)
test_data_finite = imp.transform(test_data)
np.isnan(train_data_finite).any()
from sklearn.dummy import DummyClassifier
clf = DummyClassifier(strategy='most_frequent')
clf.fit(train_data_finite, train_labels)
print("Accuracy: %f"
% clf.score(test_data_finite, test_labels))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Las características son
Step2: Características derivadas
Step3: Aquí tenemos una descripción de lo que significan cada una de las variables
Step4: Podemos descartar directamente las columnas "boat" y "body" ya que está directamente relacionadas con que el pasajero sobreviviese. El nombre es (probablemente) único para cada persona y por tanto no es informativo. Vamos a intentar en primer lugar usar "pclass", "sibsp", "parch", "fare" y "embarked" como características
Step5: En principio, los datos ahora solo contienen características útiles, pero no están en un formato que los algoritmos de aprendizaje automático puedan entender. Necesitamos transformar las cadenas "male" y "female" en variables binarias que indiquen el género y lo mismo para embarked.Podemos hacer esto usando la función get_dummies de pandas
Step6: Esta transformación ha codificado bien las columnas de cadenas. Sin embargo, parece que la variable pclass también es una variable categórica. Podemos listar de forma explícita las variables que queremos codificar utilizando el parámetro columns para incluir pclass
Step7: Una vez hemos hecho el trabajo de duro de cargar los datos, evaluar un clasificador con estos datos es directo. Vamos a ver que rendimiento obtenemos con el clasificador más simple, DummyClassifier('most_frequent'), que es equivalente al ZeroR.
|
13,991
|
<ASSISTANT_TASK:>
Python Code:
from Registry import Registry
from Registry.RegistryParse import ParseException
path_to_reg_hive = '../data/system' # The included SYSTEM hive file
hive = Registry.Registry(path_to_reg_hive)
print(type(hive))
print("Hive Name: ", hive.hive_name())
print("Hive Type: ", hive.hive_type())
# We can also open the hive directly to a key
select_key = hive.open('Select')
print("Select key path: ", select_key.path())
root = hive.root()
print(type(root))
print("Root name: ", root.name())
print("Root last written: ", root.timestamp())
print("Root path: ", root.path())
print("Root # of subkeys: ", root.subkeys_number())
print("Root # of values: ", root.values_number())
def iter_keys(iter_key):
if iter_key.subkeys_number() != 0: # We can repeat this for values
key_names = [x.name() for x in iter_key.subkeys()]
print("Subkey names: {}".format(", ".join(key_names)))
iter_keys(root)
try:
root_parent = root.parent()
except (ParseException, Registry.RegistryKeyHasNoParentException):
print("Parent of {} not available".format(root.name()))
# .find_key() allows us to get a specific value
# using the full key's path. Starts at root
# of the hive, no leading slash
key = root.find_key('Select')
print("Key name: ", key.name())
print("Key last written: ", key.timestamp())
print("Key path: ", key.path())
print("Key # of subkeys: ", key.subkeys_number())
print("Key # of values: ", key.values_number())
# We can access values by name
print("Last known good control set: ",
key.value('LastKnownGood').value())
# We can iterate over values
def iter_values(iter_key):
if iter_key.values_number() != 0:
value_names = {x.name(): x.value() for x in iter_key.values()}
print("{} key values:".format(iter_key.path()))
for name, value in value_names.items():
print("\t{}: {}".format(name, value))
iter_values(key)
key = root.find_key("Select")
ccs = key.value("current")
print("Select Value Name: ", ccs.name())
print("Current Control Set Value: ", ccs.value())
print("Value type (int): ", ccs.value_type())
print("Value type (str): ", ccs.value_type_str())
# Lets use a more complex key, now that we know the current control set
key = root.find_key(r"ControlSet{:03d}\Enum\USBSTOR".format(ccs.value()))
if key.subkeys_number() != 0:
# We will use the first device as an example
device0 = key.subkeys()[0]
print("Device: ", device0.name().replace("&", " "))
if device0.subkeys_number() != 0:
# Let's display the first device's UID & Friendly name as an example
uid0 = device0.subkeys()[0]
print("\tUID: ", uid0.name())
friendly_name = uid0.value('FriendlyName')
print("\tFriendly Name ({}): {}".format(
friendly_name.value_type_str(), friendly_name.value()))
path_to_reg_hive = '../data/system'
hive = Registry.Registry(path_to_reg_hive)
select = hive.open('Select')
# Open both
ccs = hive.open(r'ControlSet{:03d}\Enum\USB'.format(select.value("Current").value()))
lastknown = hive.open(r'ControlSet{:03d}\Enum\USB'.format(select.value("LastKnownGood").value()))
# initalize variables
ccs_dict = dict()
lastknown_dict = dict()
usb_diff = dict()
val_diff = dict()
# Collect data from ControlSet001
for sub in ccs.subkeys():
l = list()
for val in sub.subkeys():
# Get serial numbers of USBS
l.append(val.name())
ccs_dict[sub.name()] = l
# Collect data from ControlSet003
for sub in lastknown.subkeys():
l = list()
for val in sub.subkeys():
# Get serial numbers of USBS
l.append(val.name())
lastknown_dict[sub.name()] = l
# Perform comparison
for usb1 in ccs_dict.keys():
if usb1 not in lastknown_dict.keys():
usb_diff[usb1] = ccs_dict[usb1]
# Print output
import pprint
print("======== Different USBs ========")
pprint.pprint(usb_diff)
print("======== ControlSet{:03d} - CurrentControlSet - All USBs =========".format(select.value("Current").value()))
pprint.pprint(ccs_dict)
print("======== ControlSet{:03d} - LastKnownGood - All USBs =========".format(select.value("LastKnownGood").value()))
pprint.pprint(lastknown_dict)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Root Object
Step2: Key Objects
Step3: Value Objects
Step4: The System Hive Sandbox
|
13,992
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import missingno as msno
from matplotlib import pyplot as plt
stats2015 = pd.read_csv("the-counted-revised-2015.csv")
stats2016 = pd.read_csv("the-counted-revised-2016.csv")
msno.bar(stats2015)
msno.bar(stats2016)
stats2015.head()
stats2016.head()
#Dropping the extra axis
stats2015 = stats2015.drop(['Unnamed: 0'],axis=1)
stats2016 = stats2016.drop(['Unnamed: 0'],axis=1)
# Using the count methods to obtain the frequency of data
armed2015 = stats2015.groupby('armed')['index'].count()
armed2016 = stats2016.groupby('armed')['index'].count()
print('2015',armed2015,'\n')
print('2016',armed2016)
race2015 = stats2015.groupby('raceethnicity')['index'].count()
race2016 = stats2016.groupby('raceethnicity')['index'].count()
print('2015',race2015,'\n')
print('2016',race2016)
how2015 = stats2015.groupby('classification')['index'].count()
how2016 = stats2016.groupby('classification')['index'].count()
print('2015',how2015,'\n')
print('2016',how2016)
stats2015[stats2015['raceethnicity'] == 'Other']
stats2015.ix[782,'raceethnicity'] = 'Unknown'
armed2015.to_json('armed2015.json')
race2015.to_json('race2015.json')
how2015.to_json('how2015.json')
armed2016.to_json('armed2016.json')
race2016.to_json('race2016.json')
how2016.to_json('how2016.json')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Missing Data
Step2: 'streetaddress' seems to be the only column that is missing data. That is good news as we can count any of the other columns
Step3: I noticed that there was only one "other" column for raceethnicity so I decided to explore.
Step4: Looking through the database's sources, there seems to be no evidence that Mohamed Ibrahim is qualified for the race
|
13,993
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
data = sm.datasets.spector.load_pandas()
exog = data.exog
endog = data.endog
print(sm.datasets.spector.NOTE)
print(data.exog.head())
exog = sm.add_constant(exog, prepend=True)
class MyProbit(GenericLikelihoodModel):
def loglike(self, params):
exog = self.exog
endog = self.endog
q = 2 * endog - 1
return stats.norm.logcdf(q*np.dot(exog, params)).sum()
sm_probit_manual = MyProbit(endog, exog).fit()
print(sm_probit_manual.summary())
sm_probit_canned = sm.Probit(endog, exog).fit()
print(sm_probit_canned.params)
print(sm_probit_manual.params)
print(sm_probit_canned.cov_params())
print(sm_probit_manual.cov_params())
import numpy as np
from scipy.stats import nbinom
def _ll_nb2(y, X, beta, alph):
mu = np.exp(np.dot(X, beta))
size = 1/alph
prob = size/(size+mu)
ll = nbinom.logpmf(y, size, prob)
return ll
from statsmodels.base.model import GenericLikelihoodModel
class NBin(GenericLikelihoodModel):
def __init__(self, endog, exog, **kwds):
super(NBin, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
alph = params[-1]
beta = params[:-1]
ll = _ll_nb2(self.endog, self.exog, beta, alph)
return -ll
def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):
# we have one additional parameter and we need to add it for summary
self.exog_names.append('alpha')
if start_params == None:
# Reasonable starting values
start_params = np.append(np.zeros(self.exog.shape[1]), .5)
# intercept
start_params[-2] = np.log(self.endog.mean())
return super(NBin, self).fit(start_params=start_params,
maxiter=maxiter, maxfun=maxfun,
**kwds)
import statsmodels.api as sm
medpar = sm.datasets.get_rdataset("medpar", "COUNT", cache=True).data
medpar.head()
y = medpar.los
X = medpar[["type2", "type3", "hmo", "white"]].copy()
X["constant"] = 1
mod = NBin(y, X)
res = mod.fit()
print('Parameters: ', res.params)
print('Standard errors: ', res.bse)
print('P-values: ', res.pvalues)
print('AIC: ', res.aic)
print(res.summary())
res_nbin = sm.NegativeBinomial(y, X).fit(disp=0)
print(res_nbin.summary())
print(res_nbin.params)
print(res_nbin.bse)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Spector dataset is distributed with statsmodels. You can access a vector of values for the dependent variable (endog) and a matrix of regressors (exog) like this
Step2: Them, we add a constant to the matrix of regressors
Step3: To create your own Likelihood Model, you simply need to overwrite the loglike method.
Step4: Estimate the model and print a summary
Step5: Compare your Probit implementation to statsmodels' "canned" implementation
Step6: Notice that the GenericMaximumLikelihood class provides automatic differentiation, so we didn't have to provide Hessian or Score functions in order to calculate the covariance estimates.
Step7: New Model Class
Step8: Two important things to notice
Step9: The model we are interested in has a vector of non-negative integers as
Step10: Then, we fit the model and extract some information
Step11: Extract parameter estimates, standard errors, p-values, AIC, etc.
Step12: As usual, you can obtain a full list of available information by typing
Step13: Testing
|
13,994
|
<ASSISTANT_TASK:>
Python Code:
from reprophylo import *
coi = Locus(char_type='dna',
feature_type='CDS',
name='MT-CO1',
aliases=['cox1', 'coi'])
print coi
list_loci_in_genbank('data/Tetillidae.gb', # The input genbank
# file
'data/loci.csv', # The loci file
'outputs/loci_counts.txt') # Additional
# output,
# discussed
# below.
ssu = Locus('dna','rRNA','18S',['ssu','SSU-rRNA'])
loci_list = [coi, ssu]
pj = Project('data/edited_loci.csv',
pickle='outputs/my_project.pkpj', git=False)
print pj
# Update the pickle file
pickle_pj(pj, 'outputs/my_project.pkpj')
# A Locus object
coi = Locus(char_type='dna', # or 'prot'
feature_type='CDS', # any string
name='MT-CO1', # any string
aliases=['coi', 'cox1']) # list of strings
# Guess loci.csv file from a genbank file
list_loci_in_genbank('genbank.gb',
'loci.csv',
'loci_counts.txt')
# Start a Project
# With a Locus object list
pj = Project([coi, ssu], pickle='pickle_filename')
# With a loci.csv file
pj = Project('loci.csv', pickle='pickle_filename')
# Add a Locus to an existing Project
pj.loci.append(coi)
#Or
pj.loci += [coi]
# Modify a Locus existing in a Project
for l in pj.loci:
if l.name == 'MT-CO1':
l.name = 'newName'
l.feature_type = 'newFeatureType'
l.char_type = 'prot'
l.aliases.append('newAlias')
#Or
l.aliases += ['newAlias1,newAlias2']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Once this is done we can start a Project. A Project contains all the data, metadata, methods and environment information, and it is the unit that is saved as a pickle file, which is version controled with <a href=http
Step2: This is a single Locus description (a Locus object). We can confirm its content by printing it like this
Step3: Describing loci using a file
Step4: The command generated the loci file and wrote it in data/loci.csv. Here are some excerpts separated by three dots
Step5: Regardless of whether we have one or more Locus objects, they are read as a list, which means that they are wrapped with square brackets and separated by comma
Step6: This command will start the Project and will write it to the pickle file outputs/dummy.pkpj
Step7: This will provoke a bunch of Git related messages which will be discussed in the version control section of this tutorial.
Step8: 3.2.3 Modifying the loci of an existing Project
Step9: 3.2.4 Quick reference
|
13,995
|
<ASSISTANT_TASK:>
Python Code:
import random
import numpy as np
from matplotlib.colors import hsv_to_rgb
from PIL import Image as PILImage, ImageDraw as PILImageDraw
%load_ext watermark
%watermark
%watermark -a "Lilian Besson (Naereen)" -p numpy,matplotlib,PIL
def identicon(hashval=None,
size=256,
margin=0.11,
foreground=(255,0,0,255),
background=(240,240,240,255),
saturation=0.7,
brightness=0.5,
xsym=True,
):
if hashval is not None:
if isinstance(hashval, str):
hashval = abs(hash(hashval))
random.seed(int(hashval))
hue = random.random()
foreground = tuple(np.array(hsv_to_rgb([hue, saturation, brightness]) * 256, dtype=int))
border = int(size * margin)
ampl = size - 2 * border
def cropfirst(x):
return max(border, min(border + x, size - border))
def cropsecond(x):
return max(border, min(size - border - x, size - border))
# make a blank image
im = PILImage.new('RGBA', (size,size), background)
# get a drawing context
d = PILImageDraw.Draw(im)
def rect(xy):
x1, y1, x2, y2 = xy
r1 = (cropfirst(x1), cropfirst(y1), cropfirst(x2), cropfirst(y2))
if xsym:
r2 = (cropsecond(x1), cropfirst(y1), cropsecond(x2), cropfirst(y2))
else:
r2 = (cropfirst(x1), cropsecond(y1), cropfirst(x2), cropsecond(y2))
d.rectangle(r1, fill=foreground)
d.rectangle(r2, fill=foreground)
nbrectangle = random.randint(4, 8)
for i in range(nbrectangle):
x1, y1 = [random.randint(0, ampl) for _ in range(2)]
dx, dy = [random.randint(ampl // 8, ampl // 2) for _ in range(2)]
dirx, diry = [random.choice([+1, -1]) for _ in range(2)]
rect((x1, y1, x1 + dirx * dx, y1 + diry * dy))
return im
im = identicon()
im
from itertools import product
def identicon2(hashval=None,
size=256,
nbsquares=5,
margin=0.09,
foreground=(255,0,0,255),
background=(240,240,240,255),
saturation=0.7,
brightness=0.5,
xsym=True,
):
if hashval is not None:
if isinstance(hashval, str):
hashval = abs(hash(hashval))
random.seed(int(hashval) % 1<<28)
np.random.seed(int(hashval) % 1<<28)
hue = random.random()
foreground = tuple(np.array(hsv_to_rgb([hue, saturation, brightness]) * 256, dtype=int))
border = int(size * margin)
ampl = size - 2 * border
size_square = ampl // nbsquares
def cropfirst(x):
return max(border, min(border + x, size - border))
def cropsecond(x):
return max(border, min(size - border - x, size - border))
# make a blank image
im = PILImage.new('RGBA', (size,size), background)
# get a drawing context
d = PILImageDraw.Draw(im)
def rect(xy, xsym=xsym):
x1, y1, x2, y2 = xy
r1 = (cropfirst(x1), cropfirst(y1), cropfirst(x2), cropfirst(y2))
if xsym:
r2 = (cropsecond(x1), cropfirst(y1), cropsecond(x2), cropfirst(y2))
else:
r2 = (cropfirst(x1), cropsecond(y1), cropfirst(x2), cropsecond(y2))
d.rectangle(r1, fill=foreground)
d.rectangle(r2, fill=foreground)
nbrectangle = random.randint(5, nbsquares * (nbsquares // 2 + 1) - 2)
possible_coordinates = [(x, y) for (x, y) in product(range(nbsquares), range(nbsquares // 2 + 1))]
for i in np.random.choice(len(possible_coordinates), size=nbrectangle, replace=False):
y, x = possible_coordinates[i]
rect((x * size_square, y * size_square, (x+1) * size_square, (y+1) * size_square))
return im
im = identicon2()
im
for _ in range(2):
im = identicon2("Naereen")
im
for _ in range(10):
im = identicon2(size=128)
im
mylist = [identicon2() for _ in range(10)]
import base64
from IPython.display import Image
from IPython.display import HTML
def tag_from_data(data, size='100%'):
return (
'''<img
style="display:inline;
width:{1};
max-width:400px;
padding:10px;
margin-top:14px"
src="data:image/png;base64,{0}"
/>
''').format(''.join(base64.encodebytes(data).decode().split('\n')), size)
import io
def im2bytes(im):
buffer = io.BytesIO()
im.save(buffer, format="png")
return buffer.getvalue()
HTML(tag_from_data(im2bytes(im)))
class VignetteList:
def __init__(self, *images, size=None):
self.images = images
self.size = size
def _repr_html_(self):
return '<p>'+','.join(tag_from_data(im2bytes(im), self.size) for im in self.images)+'</p>'
VignetteList(*mylist, size='200px')
def tag_from_data_II(data, size='100%'):
return '''<img
style="
display:inline;
width:{1};
max-width:400px;
padding:10px;
margin-top:14px"
onMouseOver="this.style['box-shadow']='5px 5px 30px 0px rgba(163,163,163,1)'"
onMouseOut="this.style['box-shadow']=''"
src="data:image/png;base64,{0}"
/>'''.format(''.join(base64.encodebytes(data).decode().split('\n')), size)
def html_list_formatter(ll):
html = get_ipython().display_formatter.formatters['text/html']
reps = []
for o in ll:
if isinstance(o, Image):
reps.append(tag_from_data_II(o.data, '200px') )
elif isinstance(im, PILImage.Image):
reps.append(tag_from_data_II(im2bytes(o), '200px') )
else:
h = html(o)
if h:
reps.append(h)
else:
reps.append(repr(o)+'')
return '<span>['+','.join(reps)+']</span>'
ipython = get_ipython()
html = ipython.display_formatter.formatters['text/html']
html.for_type(list, html_list_formatter)
mylist
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First try
Step2: Second try
Step3: Tests
Step4: And every parameter can be changed and tuned.
Step5: List of vignettes
|
13,996
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
# Load Part-of-Speech data
from lxmls.readers.pos_corpus import PostagCorpusData
data = PostagCorpusData()
from lxmls.deep_learning.numpy_models.rnn import NumpyRNN
numpy_model = NumpyRNN(
input_size=data.input_size,
embedding_size=50,
hidden_size=20,
output_size=data.output_size,
learning_rate=0.1
)
from lxmls.deep_learning.pytorch_models.rnn import PytorchRNN
model = PytorchRNN(
input_size=data.input_size,
embedding_size=50,
hidden_size=20,
output_size=data.output_size,
learning_rate=0.1
)
# Get gradients for both models
batch = data.batches('train', batch_size=1)[0]
gradient_numpy = numpy_model.backpropagation(batch['input'], batch['output'])
gradient = model.backpropagation(batch['input'], batch['output'])
gradient[0].shape, gradient_numpy[0].shape
%matplotlib inline
import matplotlib.pyplot as plt
# Gradient for word embeddings in the example
plt.subplot(2,2,1)
plt.imshow(gradient_numpy[0][batch['input'], :], aspect='auto', interpolation='nearest')
plt.colorbar()
plt.subplot(2,2,2)
plt.imshow(gradient[0].numpy()[batch['input'], :], aspect='auto', interpolation='nearest')
plt.colorbar()
# Gradient for word embeddings in the example
plt.subplot(2,2,3)
plt.imshow(gradient_numpy[1], aspect='auto', interpolation='nearest')
plt.colorbar()
plt.subplot(2,2,4)
plt.imshow(gradient[1].numpy(), aspect='auto', interpolation='nearest')
plt.colorbar()
plt.show()
# Alterbative native CuDNN native implementation of RNNs
from lxmls.deep_learning.pytorch_models.rnn import FastPytorchRNN
fast_model = FastPytorchRNN(
input_size=data.input_size,
embedding_size=50,
hidden_size=20,
output_size=data.output_size,
learning_rate=0.1
)
num_epochs = 10
model = model
import numpy as np
import time
# Get batch iterators for train and test
train_batches = data.batches('train', batch_size=1)
dev_set = data.batches('dev', batch_size=1)
test_set = data.batches('test', batch_size=1)
# Epoch loop
start = time.time()
for epoch in range(num_epochs):
# Batch loop
for batch in train_batches:
model.update(input=batch['input'], output=batch['output'])
# Evaluation dev
is_hit = []
for batch in dev_set:
is_hit.extend(model.predict(input=batch['input']) == batch['output'])
accuracy = 100*np.mean(is_hit)
# Inform user
print("Epoch %d: dev accuracy %2.2f %%" % (epoch+1, accuracy))
print("Training took %2.2f seconds per epoch" % ((time.time() - start)/num_epochs))
# Evaluation test
is_hit = []
for batch in test_set:
is_hit.extend(model.predict(input=batch['input']) == batch['output'])
accuracy = 100*np.mean(is_hit)
# Inform user
print("Test accuracy %2.2f %%" % accuracy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check Numpy and Pytorch Gradients match
Step2: To debug your code you can compare the numpy and Pytorch gradients using
Step3: and then plotting them with matplotlib
Step4: Train model
|
13,997
|
<ASSISTANT_TASK:>
Python Code:
!pip install graphspace_python==0.8.3
print("Hello World")
l = []
# l = list()
l = ['apple', 'orange', 123]
print(l)
print(l[0], l[1])
tup = ()
#tup = tuple()
tup3 = tuple([1,2,3])
print(tup3)
tup4 = tuple('Hello')
print(tup4)
data = {}
# data = dict()
data['firstname'] = 'Aditya'
data['lastname'] = 'Bharadwaj'
data['age'] = 25
print(data)
for i in [1,2,3,4,5]:
print(i)
import networkx as nx
G = nx.DiGraph()
# Add a node
G.add_node('a')
# Add multiple nodes
G.add_nodes_from(['b', 'c', 'd'])
G.nodes()
# Remove node from the graph
G.remove_node('d')
G.nodes()
# Add edges to the graph
G.add_edge('a', 'b')
G.add_edges_from([('b','c'), ('c', 'a')])
G.edges()
# Remove edge from the graph
G.remove_edge('c', 'a')
G.edges()
# Get Graph Info
print(nx.info(G))
%matplotlib inline
nx.draw(G, with_labels=True)
from graphspace_python.api.client import GraphSpace
graphspace = GraphSpace('user6@example.com', 'user6')
from graphspace_python.graphs.classes.gsgraph import GSGraph
graph = graphspace.post_graph(GSGraph(G))
print(graph.url)
# Update the name of the graph
graph.set_name('My First Graph')
graph = graphspace.update_graph(graph)
print(graph.url)
G = nx.DiGraph()
# add at creation
# nodes
G.add_node('a', favorite_color='yellow')
G.add_nodes_from([('b', {'favorite_color' : 'green'}),
('c', {'favorite_color' :'red'})])
# edges
G.add_edge('a', 'b', {'relationship' : 'friends'})
G.add_edge('b', 'c', {'relationship' : 'enemy'})
# accessing node attributes
print("Node 'a' attributes:", G.node['a'])
# accessing edge attributes
print("Edge a-b attributes:", G.edge['a']['b'])
label = {
'a' : 'A',
'b' : 'B',
'c' : 'C'
}
nx.set_node_attributes(G, 'label', label)
print("Node a's label is %s" % G.node['a']['label'])
graph = graphspace.update_graph(GSGraph(G), graph_id=graph.id)
print(graph.url)
graph.set_name('My First Graph')
# Add tags
graph.set_tags(['icsb2017', 'tutorial'])
# Add any number of attributes that best characterize the network
graph.set_data({
'author': 'Aditya Bharadwaj',
'contact_email': 'adb@vt.edu',
'description': "This graph was posted during ICSB 2017 workshop on GraphSpace"
})
graph = graphspace.update_graph(graph, graph_id=graph.id)
print(graph.url)
for n in graph.nodes():
graph.add_node_style(n, shape='rectangle', color=G.node[n]['favorite_color'], width=100, height=100)
graph = graphspace.update_graph(graph)
print(graph.url)
graph.add_edge_style('a', 'b', directed=False, edge_style='solid', width=10.0, color='blue')
graph.add_edge_style('b', 'c', directed=False, edge_style='dashed', width=10.0, color='red')
graph = graphspace.update_graph(graph)
print(graph.url)
graph.node['a']['popup'] = 'Node A'
graph.node['b']['popup'] = 'Node B'
graph.node['c']['popup'] = 'Node C'
graph['a']['b']['popup'] = 'Friends'
graph['b']['c']['popup'] = 'Enemy'
graph = graphspace.update_graph(graph)
print(graph.url)
# HTML formated popups
graph['a']['b']['popup'] = '<h3>Edge between A and B</h3> <br/> Relationship: Friends'
graph['b']['c']['popup'] = '<h3>Edge between B and C</h3> <br/> Relationship: Enemy'
graph.node['a']['popup'] = '<h3>Node A</h3> <br/> <img height="250" src="http://images.clipartpanda.com/boy-20clip-20art-blond-boy.png" alt="Image A" />'
graph.node['b']['popup'] = '<h3>Node B</h3> <br/> <img height="250" src="http://images.clipartpanda.com/boy-clipart-birthday-boy.png" alt="Image B" />'
graph.node['c']['popup'] = '<h3>Node C</h3> <br/> <img height="250" src="http://images.clipartpanda.com/boy-clipart-9a67af7554253b6a9b7014c36c348f09.jpg" alt="Image C" />'
graph = graphspace.update_graph(graph)
print(graph.url)
# You can also embed websites in popups as well. This could be useful for embedding sites like genecards.
graph['a']['b']['popup'] = '<h3>Edge between A and B</h3> <br/> Relationship: <a href="https://en.m.wikipedia.org/wiki/Friendship">Friends</a> <iframe src="https://en.m.wikipedia.org/wiki/Friendship" scrolling="auto" width="100%" height="500"></iframe>'
graph['b']['c']['popup'] = '<h3>Edge between B and C</h3> <br/> Relationship: <a href="https://en.m.wikipedia.org/wiki/Enemy">Enemy</a> <iframe src="https://en.m.wikipedia.org/wiki/Enemy" scrolling="auto" width="100%" height="500"></iframe>'
graph = graphspace.update_graph(graph)
print(graph.url)
# Position nodes in a vertical alignment.
graph.set_node_position('a', y=0, x=0)
graph.set_node_position('b', y=250, x=0)
graph.set_node_position('c', y=500, x=0)
graph = graphspace.update_graph(graph)
print(graph.url)
# G.add_edge('a', 'b', weight=5)
# G.add_edge('b', 'c', weight=10)
graph['a']['b']['weight'] = 5
graph['b']['c']['weight'] = 10
graph.edges(data=True)
graph = graphspace.update_graph(graph, graph_id=graph.id)
print(graph.url)
# Laying out edges with width propotional to edge weight.
graph.add_edge_style('a', 'b', directed=False, edge_style='solid', width=graph['a']['b']['weight'], color='blue')
graph.add_edge_style('b', 'c', directed=False, edge_style='dashed', width=graph['b']['c']['weight'], color='red')
graph = graphspace.update_graph(graph)
print(graph.url)
from graphspace_python.graphs.classes.gsgroup import GSGroup
group = graphspace.post_group(GSGroup(name='My first group', description='sample group'))
print(group.url)
# Initially a group is created with the group owner as a member.
for member in graphspace.get_group_members(group=group):
print(member.email)
# Group owner can add existing users by their GraphSpace usernames
for email in ['adb@vt.edu', 'adb@cs.vt.edu']:
graphspace.add_group_member(group=group, member_email=email)
# Getting the list of all group members
for member in graphspace.get_group_members(group=group):
print(member.email, member.id)
# Send the following invitation link to your collaborators (with/without GraphSpace accounts)
print(group.invite_link)
graphspace.delete_group_member(group=group, member_id=70)
for member in graphspace.get_group_members(group=group):
print(member.email)
graphspace.share_graph(graph=graph, group=group)
for shared_graph in graphspace.get_group_graphs(group=group):
print(shared_graph.owner_email, shared_graph.name)
graphspace.unshare_graph(graph=graph, group=group)
# No graph is shared with the group at this point.
for shared_graph in graphspace.get_group_graphs(group=group):
print(shared_graph.owner_email, shared_graph.name)
from graphspace_python.graphs.classes.gslayout import GSLayout
L = GSLayout()
# Assign different colors to nodes
L.add_node_style('a', shape='ellipse', color='yellow', width=100, height=100)
L.add_node_style('b', shape='triangle', color='green', width=100, height=100)
L.add_node_style('c', shape='rectangle', color='red', width=100, height=100)
L.add_edge_style('a', 'b', directed=False, edge_style='solid', width=5.0, color='blue')
L.add_edge_style('b', 'c', directed=False, edge_style='dashed', width=5.0, color='red')
L.set_name('My First Layout')
layout = graphspace.post_graph_layout(graph_id=graph.id, layout=L)
# Go to the following url o visualize the layout.
print(layout.url)
layout.set_node_position('a', y=0, x=0)
layout.set_node_position('b', y=0, x=250)
layout.set_node_position('c', y=0, x=500)
layout = graphspace.update_graph_layout(graph_id=graph.id, layout=layout)
# Go to the following url to visualize the layout.
print(layout.url)
# Setting font style
layout.add_node_style('a', attr_dict={'font-size':24, 'font-family': 'Lucida Console, Courier, monospace'}, shape='ellipse', color=graph.node['a']['favorite_color'], width=100, height=100)
layout.add_node_style('b', attr_dict={'font-size':24, 'font-family': 'Lucida Console, Courier, monospace'}, shape='triangle', color=graph.node['b']['favorite_color'], width=100, height=100)
layout.add_node_style('c', attr_dict={'font-size':24, 'font-family': 'Lucida Console, Courier, monospace'}, shape='rectangle', color=graph.node['c']['favorite_color'], width=100, height=100)
layout = graphspace.update_graph_layout(graph_id=graph.id, layout=layout)
print(layout.url)
# Setting image backgrounds
import pickle
tutorial_node_images = pickle.load( open( "data/tutorial_node_images.p", "rb" ) )
layout.add_node_style('a', attr_dict={
'background-image': tutorial_node_images['a'],
'background-clip': 'none',
'background-fit': 'contain',
'background-opacity': 0,
'border-opacity': 0,
'text-margin-y': 5
}, width=100, height=100)
layout.add_node_style('b', attr_dict={
'background-image': tutorial_node_images['b'],
'background-clip': 'none',
'background-fit': 'contain',
'background-opacity': 0,
'border-opacity': 0,
'text-margin-y': 5
}, width=100, height=100)
layout.add_node_style('c', attr_dict={
'background-image': tutorial_node_images['c'],
'background-clip': 'none',
'background-fit': 'contain',
'background-opacity': 0,
'border-opacity': 0,
'text-margin-y': 5
}, width=100, height=100)
layout = graphspace.update_graph_layout(graph_id=graph.id, layout=layout)
print(layout.url)
graph = graphspace.set_default_graph_layout(graph=graph, layout=layout)
# You should see the above layout by default for the given graph.
print(graph.url)
for mylayout in graphspace.get_my_graph_layouts(graph_id=graph.id):
print(mylayout.name, mylayout.id)
layouts = graphspace.get_shared_graph_layouts(graph=graph)
print(layouts)
layout.set_is_shared(1)
layout = graphspace.update_graph_layout(graph=graph, layout=layout)
print([l.name for l in graphspace.get_shared_graph_layouts(graph=graph)])
# Sharing the graph with everyone. This graph will show up in Public Graphs list.
graph = graphspace.publish_graph(graph=graph)
print(graph.url)
# Unpublishing graphs
graph = graphspace.unpublish_graph(graph_id=graph.id)
print(graph.url)
# Getting a list of public graphs with 'pathlinker' as a subtring in atleast one of their tags.
for g in graphspace.get_public_graphs(tags=['%pathlinker%'],limit=100, offset=0):
print(g.owner_email, g.name)
# Getting a list of my graphs.
for g in graphspace.get_my_graphs(limit=100, offset=0):
print(g.owner_email, g.name)
graphspace.delete_group(group=group)
graphspace.delete_graph(graph=graph)
## Map values to node color
g = nx.erdos_renyi_graph(10, 0.1, seed=10, directed=True)
g = GSGraph(nx.relabel_nodes(g, {n: str(n) for n in g.nodes()}))
nx.set_node_attributes(g, 'degree_centrality', nx.degree_centrality(g))
for n in g.nodes():
g.add_node_style(n, attr_dict={'background-opacity': g.node[n]['degree_centrality']}, color='blue', width=50, height=50)
g.set_name('Random Graph with degree centrality mapped to node color')
graph = graphspace.post_graph(g)
print(graph.url)
graphspace.delete_graph(graph=graph)
## Map values to node size
g = nx.erdos_renyi_graph(10, 0.1, seed=10, directed=True)
g = GSGraph(nx.relabel_nodes(g, {n: str(n) for n in g.nodes()}))
nx.set_node_attributes(g, 'degree_centrality', nx.degree_centrality(g))
for n in g.nodes():
g.add_node_style(n, color='blue', width=200*g.node[n]['degree_centrality']+20, height=200*g.node[n]['degree_centrality']+20)
g.set_name('Random Graph with degree centrality mapped to node size')
graph = graphspace.post_graph(g)
print(graph.url)
graphspace.delete_graph(graph=graph)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 2
Step2: Lists
Step3: In python, Indexing starts from 0. Thus now the list l, which has three elements will have apple at 0 index, orange at 1 index and 123 at 2 index.
Step4: Tuple
Step5: Values can be assigned while declaring a tuple. It takes a list as input and converts it into a tuple or it takes a string and converts it into a tuple.
Step6: Dictionaries
Step7: Loops
Step8: Part 3
Step9: NetworkX with Matplotlib¶
Step10: Uploading your network to GraphSpace
Step11: Once you are connected, you can use this connection to post/upload your graphs to GraphSpace.
Step12: Updating your network on GraphSpace
Step13: Adding and Inspecting Attributes
Step14: Adding node labels
Step15: Adding graph information
Step16: Adding visual styles to nodes
Step17: Adding visual style to edges
Step18: Adding annotations to nodes and edges
Step19:
Step20: Laying out nodes
Step21: Specifying weights on edges
Step22: Note
Step23: Part 4
Step24: Adding and removing group members
Step25: Sharing graphs with the groups
Step26: Unsharing graphs
Step27: Part 5
Step28:
Step29: More examples
Step30: Set the layout as default
Step31: Sharing layouts
Step32: Similarily you can also get the list of shared layouts using get_shared_graph_layouts method.
Step33: Part 6
Step34: Part 7
Step35: Part 8
Step36: More examples
Step37:
Step38:
|
13,998
|
<ASSISTANT_TASK:>
Python Code:
import os,sys
import numpy
%matplotlib inline
import matplotlib.pyplot as plt
sys.path.insert(0,'../')
from utils.mkdesign import create_design_singlecondition
from nipy.modalities.fmri.hemodynamic_models import spm_hrf,compute_regressor
from utils.make_data import make_continuous_data
from utils.graph_utils import show_graph_from_adjmtx,show_graph_from_pattern
from statsmodels.tsa.arima_process import arma_generate_sample
import scipy.stats
from dcm_sim import sim_dcm_dataset
results_dir = os.path.abspath("../results")
if not os.path.exists(results_dir):
os.mkdir(results_dir)
_,data_conv,params=sim_dcm_dataset(verbose=True)
A_mtx=params['A']
B_mtx=params['B']
u=params['u']
# downsample design to 1 second TR
u=numpy.convolve(params['u'],spm_hrf(params['stepsize'],oversampling=1))
u=u[range(0,data_conv.shape[0],int(1./params['stepsize']))]
ntp=u.shape[0]
tetrad_dir='/home/vagrant/data/tetrad_files'
if not os.path.exists(tetrad_dir):
os.mkdir(tetrad_dir)
nfiles=10
for i in range(nfiles):
_,data_conv,params=sim_dcm_dataset()
# downsample to 1 second TR
data=data_conv[range(0,data_conv.shape[0],int(1./params['stepsize']))]
ntp=data.shape[0]
imagesdata=numpy.hstack((numpy.array(u)[:,numpy.newaxis],data))
numpy.savetxt(os.path.join(tetrad_dir,"data%03d.txt"%i),
imagesdata,delimiter='\t',
header='u\t0\t1\t2\t3\t4',comments='')
!bash run_images.sh
g=show_graph_from_pattern('images_test/test.pattern.dot')
show_graph_from_adjmtx(A_mtx,B_mtx,params['C'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data generated using the DCM forward model. In this model, there is a significant static connectivity from 1->2 and 1->3 (A matrix), and a PPI for 0->2 and 0->4 (B matrix) and a significant input to ROI 0 (C matrix).
Step2: Generate a set of synthetic datasets, referring to individual subjects
Step3: Run iMAGES (using a shell script)
Step4: Show the graph estimated by iMAGES
Step5: Show the true graph from the DCM forward model
|
13,999
|
<ASSISTANT_TASK:>
Python Code:
# Load library
import numpy as np
# Create matrix
matrix_a = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 2]])
# Create matrix
matrix_b = np.array([[1, 3, 1],
[1, 3, 1],
[1, 3, 8]])
# Add two matrices
np.add(matrix_a, matrix_b)
# Subtract two matrices
np.subtract(matrix_a, matrix_b)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Matrices
Step2: Add Matrices
Step3: Subtract Matrices
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.