text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is where we are and when the notebook was run
Step2: First lets set the output path for where we want our plots to be saved and the database path and see what tables it contains
Step3: This code adds a button to see/hide code in html
Step4: The code below provides functions for accessing the project database and extract a table names so you can see what tables have been loaded into the database and are available for plotting. It also has a function for geting table from the database and indexing the table with the track name
Step5: Here are some functions we need
Step6: Reads per Chromsome
Step7: Contigs that have been filtered should clearly show up with 0 reads across the row
Step8: THIS IS WHERE YOU CAN WRITE YOU OWN SUMMARY
Step9: WRITE YOUR COMMENTS HERE
|
<ASSISTANT_TASK:>
Python Code:
import sqlite3
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import CGATPipelines.Pipeline as P
import os
import statistics
import collections
#load R and the R packages required
%load_ext rpy2.ipython
%R require(ggplot2)
# use these functions to display tables nicely as html
from IPython.display import display, HTML
plt.style.use('bmh')
#look at other available styles for plotting
#plt.style.available
!pwd
!date
database_path ='../csvdb'
output_path = '.'
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
def getTableNamesFromDB(database_path):
# Create a SQL connection to our SQLite database
con = sqlite3.connect(database_path)
cur = con.cursor()
# the result of a "cursor.execute" can be iterated over by row
cur.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;")
available_tables = (cur.fetchall())
#Be sure to close the connection.
con.close()
return available_tables
db_tables = getTableNamesFromDB(database_path)
print('Tables contained by the database:')
for x in db_tables:
print('\t\t%s' % x[0])
#This function retrieves a table from sql database and indexes it with track name
def getTableFromDB(statement,database_path):
'''gets table from sql database depending on statement
and set track as index if contains track in column names'''
conn = sqlite3.connect(database_path)
df = pd.read_sql_query(statement,conn)
if 'track' in df.columns:
df.index = df['track']
return df
###These are functions used to manipulate the table so order of chromsomes is consistent with numbers
def StrIsInt(string):
'''function that takes string and tests if it can be represented as an int
e.g. returns true for "3", but False for "Chr3" '''
try:
int(string)
return True
except ValueError:
return False
def orderListOfChr(unordered_chr_list):
'''take a list of chromosomes and return them in
order of chromosome number not string order
e.g. input = ["chr1",'chr11","chr2","chrM"]
output = ["chr1",'chr2","chr11","chrM"]'''
#make a empty list same length as chromosomes
chr_id = [None]* len(unordered_chr_list)
for value in unordered_chr_list:
x = value.split("chr")[-1]
# check if chr name is int or str
if StrIsInt(x):
chr_id[int(x)-1] = value
else:
chr_id.append(value)
#remove none values from list
ordered_chr_list = [x for x in chr_id if x is not None]
return ordered_chr_list
def reorderDFbyChrOrder(df):
'''Takes a dataframe indexed on chr name
and returns dataframe so that index is sorted based on the
chromosome number
e.g.dataframe with index chr1,chr11,chr12,chr2,chrM
will be returned with rows in the order "chr1, chr2, chr11, chr12, chrM" '''
list_of_reordered_chr = orderListOfChr(df.index)
return df.reindex(list_of_reordered_chr)
# this subsets dataframe so only includes columns containing chr
def getChrNames(df):
'''takes dataframe with chromocome names in columns and returns a list of the chromosomes present'''
to_keep = []
for item in df.columns:
if 'chr' in item:
to_keep.append(item)
return to_keep
idxstats_df = getTableFromDB('select * from idxstats_reads_per_chromosome;',database_path)
idxstats_df.index = idxstats_df['region']
reads_per_chr_df = reorderDFbyChrOrder(idxstats_df.drop('region', 1))
print ('this table shows million reads per chromosome')
reads_per_chr_df.divide(1000000)
def makeReadsPerChrPlot(df,path):
'''takes table from database of chromosome lengths and makes individual plot for
each sample of how many reads map to each chromosome'''
to_keep = []
for item in df.columns:
if 'chr' in item:
to_keep.append(item)
df = df[to_keep]
df = df.divide(1000000)
#where plot will be sent to
file_path = "/".join([path,'mapped_reads_per_chromosome_plot.pdf'])
print ('figure_saved_to %s' % file_path)
ax = df.T.plot(figsize=(11,5),
xticks = range(len(to_keep)),
title = 'Million reads mapped to each chromosome',
ylim=(0,10))
#set labels for plots
ax.set_xlabel("Contig")
ax.set_ylabel("million reads")
fig = matplotlib.figure.Figure()
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
matplotlib.pyplot.savefig(file_path, bbox_inches='tight')
matplotlib.pyplot.show()
makeReadsPerChrPlot(reads_per_chr_df.T,output_path)
def makePercentReadsPerChrPlot(df,path):
'''takes the idxstats_reads_per_chromosome table from database and calculates percentage
of reads mapping to each chromosome and plots this for each chromosome and returns
percentage table'''
c = df.copy()
for item in c.columns:
if 'chr' not in item and item != 'total_reads':
c = c.drop(item,1)
y = c.div(c.total_reads, axis ='index')*100
y = y.drop('total_reads',1)
file_path = "/".join([path,'percentage_mapped_reads_per_chromosome_plot.pdf'])
print ('figure_saved_to %s' % file_path)
ax = y.T.plot(figsize=(10,5),
xticks = range(len(y.columns)),
title = 'Percentage of total input reads that map to each contig',
ylim=(0,100))
ax.set_xlabel("Contig")
ax.set_ylabel("percentage_reads")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
fig = matplotlib.figure.Figure()
matplotlib.pyplot.savefig(file_path, bbox_inches='tight')
matplotlib.pyplot.show()
return y
percent_idxdf = makePercentReadsPerChrPlot(reads_per_chr_df.T,output_path)
percent_idxdf.T
len(reads_per_chr_df.columns)
def makeReadsPerSampleChrPlot(df,path,subplot_dims):
'''takes table from database of chromosome lengths and makes individual plot for
each sample of how many reads map to each chromosome
subplot dims = tuples of the format (num_rows,num_cols)'''
to_keep = []
for item in df.columns:
if 'chr' in item:
to_keep.append(item)
df = df[to_keep]
df = df.divide(1000000)
#where plot will be sent to
file_path = "/".join([path,'mapped_reads_per_chromosome_per_sample_plot.pdf'])
print ('figure_saved_to %s' % file_path)
#plot as subplots-
# can change layout to be better layout=(num_rows,num_cols)
# returns a list of axis of the subplots - select the right axis to add labels
ax = df.T.plot(subplots=True,
figsize=(10,10),
layout = subplot_dims,
xticks = range(len(to_keep)),
title = 'Million reads mapped to each chromosome per sample',
ylim=(0,10))
#set labels for plots
bottom_plot = ax[-1][0]
middle_plot = ((int(subplot_dims[0]/2), int(subplot_dims[1]/2)))
a = ax[middle_plot]
a.set_ylabel("million reads")
fig = matplotlib.figure.Figure()
matplotlib.pyplot.savefig(file_path, bbox_inches='tight')
matplotlib.pyplot.show()
makeReadsPerSampleChrPlot(reads_per_chr_df.T,output_path,(len(reads_per_chr_df.T.columns),1))
def makePercentReadsPerSampleChrPlot(df,path,subplot_dims):
'''takes the idxstats_reads_per_chromosome table from database and calculates percentage
of reads mapping to each chromosome and plots this for each chromosome and returns
percentage table'''
c = df.copy()
for item in c.columns:
if 'chr' not in item and item != 'total_reads':
c = c.drop(item,1)
y = c.div(c['total_reads'], axis ='index')*100
y = y.drop('total_reads',1)
file_path = "/".join([path,'percentage_mapped_reads_per_chromosome_per_sample_plot.pdf'])
print ('figure_saved_to %s' % file_path)
ax = y.T.plot(subplots=True,
layout = subplot_dims,
figsize=(10,10),
xticks = range(len(y.columns)),
title = 'Percentage of total input reads that map to each contig',
ylim=(0,100))
ax[-1][0].set_xlabel("Contig")
middle_plot = ((int(subplot_dims[0]/2), int(subplot_dims[1]/2)))
ax[middle_plot].set_ylabel("percentage_reads")
fig = matplotlib.figure.Figure()
matplotlib.pyplot.savefig(file_path, bbox_inches='tight')
matplotlib.pyplot.show()
makePercentReadsPerSampleChrPlot(reads_per_chr_df.T,output_path,(len(reads_per_chr_df.columns),1))
x_vs_y_df = idxstats_df.drop('region', 1).T[['chrX','chrY']].copy()
print (x_vs_y_df.head())
x_vs_y_df['total_xy'] = x_vs_y_df.chrX + x_vs_y_df.chrY
x_vs_y_df['percentX'] = x_vs_y_df.chrX/x_vs_y_df.total_xy * 100
x_vs_y_df['percentY'] = x_vs_y_df.chrY/x_vs_y_df.total_xy * 100
display(x_vs_y_df)
#plot bar graph of number of thousand reads mapping to chrX vs chrY
ax = x_vs_y_df[['chrX','chrY']].divide(1000).plot.bar()
ax.set_ylabel('Thousand Reads (not pairs)')
ax.legend(['chrX','chrY'], loc=2,bbox_to_anchor=(1.05, 1),borderaxespad=0. )
ax.set_title('number of reads (not pairs) \n mapping to chrX or chrY')
# plot graph of percentage of reads mapping to either chr X or Y
ax = x_vs_y_df[['percentX', 'percentY']].plot.bar(stacked=True)
ax.legend(['chrX','chrY'], loc=2,bbox_to_anchor=(1.05, 1),borderaxespad=0. )
ax.set_ylabel('percentage reads')
ax.set_title('percentage of sex chromosome reads mapping \n to chrX or chrY')
ax.set_ylim((0,110))
def add_expt_to_df(dataframe):
''' splits track name for example HsTh1-RATotal-R1.star into expt
featues, expt, sample_treatment and replicate and adds these as
collumns to the dataframe'''
expt = []
treatment = []
replicate = []
for value in dataframe.track:
#remone star label
#print value
x = value.split(".")
# split into design features
y = x[0].split('-')
expt.append(y[0])
treatment.append(y[1])
replicate.append(y[2])
if len(expt) == len(treatment) and len(expt)== len(replicate):
print ('all values in list correctly')
else:
print ('error in loading values into lists')
#add collums to dataframe
dataframe['expt_name'] = expt
dataframe['sample_treatment'] = treatment
dataframe['replicate'] = replicate
return dataframe
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
import cv2
import numpy as np
%matplotlib notebook
%matplotlib inline
from matplotlib import pyplot as plt
img = cv2.imread("HappyFish.jpg",cv2.IMREAD_GRAYSCALE)
_,mask = cv2.threshold(img, 220,255,cv2.THRESH_BINARY_INV)
kernal = np.ones((5,5),np.uint8)
dilation = cv2.dilate(mask,kernal,iterations = 3)
erosion = cv2.erode(mask,kernal,iterations=1)
opening = cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernal)
closing = cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernal)
MORPH_GRADIENT = cv2.morphologyEx(mask,cv2.MORPH_GRADIENT,kernal)
top_hat = cv2.morphologyEx(mask,cv2.MORPH_TOPHAT,kernal)
titles = ['images',"mask","dilation","erosion","opening",
"closing","MORPH_GRADIENT","top_hat"]
images = [img,mask,dilation,erosion,opening,
closing,MORPH_GRADIENT,top_hat]
for i in range(len(titles)):
plt.subplot(2,4,i+1)
plt.imshow(images[i],"gray")
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In order to evaluate the different features we will use a simple k-means clustering with the only assumption of knowing the true number of classes existing in the dataset.
Step2: Using the cluster indicators for each data sample we then use the normalised mutual information score to evalutate the similarity between the predicted labels and the ground truth labels.
Step3: First we will perform k-means clustering on the raw feature space.
Step4: Now use a single layer DSNMF model -- i.e. Semi-NMF
Step5: Not bad! That's a huge improvement over using k-means
Step6: Train the model
Step7: Evaluate it in terms of clustering performance using
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import sklearn
from sklearn.cluster import KMeans
from dsnmf import DSNMF, appr_seminmf
from scipy.io import loadmat
mat = loadmat('PIE_pose27.mat', struct_as_record=False, squeeze_me=True)
data, gnd = mat['fea'].astype('float32'), mat['gnd']
# Normalise each feature to have an l2-norm equal to one.
data /= np.linalg.norm(data, 2, 1)[:, None]
n_classes = np.unique(gnd).shape[0]
kmeans = KMeans(n_classes, precompute_distances=False)
def evaluate_nmi(X):
pred = kmeans.fit_predict(X)
score = sklearn.metrics.normalized_mutual_info_score(gnd, pred)
return score
print("K-means on the raw pixels has an NMI of {:.2f}%".format(100 * evaluate_nmi(data)))
from sklearn.decomposition import PCA
fea = PCA(100).fit_transform(data)
score = evaluate_nmi(fea)
print("K-means clustering using the top 100 eigenvectors has an NMI of {:.2f}%".format(100 * score))
Z, H = appr_seminmf(data.T, 100) # seminmf expects a num_features x num_samples matrix
print("K-means clustering using the Semi-NMF features has an NMI of {:.2f}%".format(100 * evaluate_nmi(H.T)))
dsnmf = DSNMF(data, layers=(400, 100))
for epoch in range(1000):
residual = float(dsnmf.train_fun())
print("Epoch {}. Residual [{:.2f}]".format(epoch, residual), end="\r")
fea = dsnmf.get_features().T # this is the last layers features i.e. h_2
pred = kmeans.fit_predict(fea)
score = sklearn.metrics.normalized_mutual_info_score(gnd, pred)
print("NMI: {:.2f}%".format(100 * score))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Let's start with a simple model and config file. You can edit the CONFIG string within the file, or copy it out to a separate file and use Config.from_disk to load it from a path. The [ray] section contains the settings to use for Ray. (We're using a config for convenience, but you don't have to – you can also just hard-code the values.)
Step3: Just like in the original Ray tutorial, we're using the MNIST data (via our ml-datasets package) and are setting up two helper functions
Step4: Setting up Ray
Step5: Defining the Parameter Server
Step6: Defining the Worker
Step7: Setting up the model
Step8: We didn't specify all the dimensions in the model, so we need to pass in a batch of data to finish initialization. This lets Thinc infer the missing shapes.
Step9: Training
Step10: On each iteration, we now compute the gradients for each worker. After all gradients are available, ParameterServer.apply_gradients is called to calculate the update. The frequency setting in the evaluation config specifies how often to evaluate – for instance, a frequency of 10 means we're only evaluating every 10th epoch.
Step11: Asynchronous Parameter Server Training
|
<ASSISTANT_TASK:>
Python Code:
# To let ray install its own version in Colab
!pip uninstall -y pyarrow
# You might need to restart the Colab runtime
!pip install --upgrade "thinc>=8.0.0a0" "ml_datasets>=0.2.0a0" ray psutil setproctitle
import thinc
from thinc.api import chain, Relu, Softmax
@thinc.registry.layers("relu_relu_softmax.v1")
def make_relu_relu_softmax(hidden_width: int, dropout: float):
return chain(
Relu(hidden_width, dropout=dropout),
Relu(hidden_width, dropout=dropout),
Softmax(),
)
CONFIG =
[training]
iterations = 200
batch_size = 128
[evaluation]
batch_size = 256
frequency = 10
[model]
@layers = "relu_relu_softmax.v1"
hidden_width = 128
dropout = 0.2
[optimizer]
@optimizers = "Adam.v1"
[ray]
num_workers = 2
object_store_memory = 3000000000
num_cpus = 2
import ml_datasets
MNIST = ml_datasets.mnist()
def get_data_loader(model, batch_size):
(train_X, train_Y), (dev_X, dev_Y) = MNIST
train_batches = model.ops.multibatch(batch_size, train_X, train_Y, shuffle=True)
dev_batches = model.ops.multibatch(batch_size, dev_X, dev_Y, shuffle=True)
return train_batches, dev_batches
def evaluate(model, batch_size):
dev_X, dev_Y = MNIST[1]
correct = 0
total = 0
for X, Y in model.ops.multibatch(batch_size, dev_X, dev_Y):
Yh = model.predict(X)
correct += (Yh.argmax(axis=1) == Y.argmax(axis=1)).sum()
total += Yh.shape[0]
return correct / total
from collections import defaultdict
def get_model_weights(model):
params = defaultdict(dict)
for node in model.walk():
for name in node.param_names:
if node.has_param(name):
params[node.id][name] = node.get_param(name)
return params
def set_model_weights(model, params):
for node in model.walk():
for name, param in params[node.id].items():
node.set_param(name, param)
def get_model_grads(model):
grads = defaultdict(dict)
for node in model.walk():
for name in node.grad_names:
grads[node.id][name] = node.get_grad(name)
return grads
def set_model_grads(model, grads):
for node in model.walk():
for name, grad in grads[node.id].items():
node.set_grad(name, grad)
import ray
@ray.remote
class ParameterServer:
def __init__(self, model, optimizer):
self.model = model
self.optimizer = optimizer
def apply_gradients(self, *worker_grads):
summed_gradients = defaultdict(dict)
for grads in worker_grads:
for node_id, node_grads in grads.items():
for name, grad in node_grads.items():
if name in summed_gradients[node_id]:
summed_gradients[node_id][name] += grad
else:
summed_gradients[node_id][name] = grad.copy()
set_model_grads(self.model, summed_gradients)
self.model.finish_update(self.optimizer)
return get_model_weights(self.model)
def get_weights(self):
return get_model_weights(self.model)
from thinc.api import fix_random_seed
@ray.remote
class DataWorker:
def __init__(self, model, batch_size=128, seed=0):
self.model = model
fix_random_seed(seed)
self.data_iterator = iter(get_data_loader(model, batch_size)[0])
self.batch_size = batch_size
def compute_gradients(self, weights):
set_model_weights(self.model, weights)
try:
data, target = next(self.data_iterator)
except StopIteration: # When the epoch ends, start a new epoch.
self.data_iterator = iter(get_data_loader(model, self.batch_size)[0])
data, target = next(self.data_iterator)
guesses, backprop = self.model(data, is_train=True)
backprop((guesses - target) / target.shape[0])
return get_model_grads(self.model)
from thinc.api import registry, Config
C = registry.resolve(Config().from_str(CONFIG))
C
optimizer = C["optimizer"]
model = C["model"]
(train_X, train_Y), (dev_X, dev_Y) = MNIST
model.initialize(X=train_X[:5], Y=train_Y[:5])
ray.init(
ignore_reinit_error=True,
object_store_memory=C["ray"]["object_store_memory"],
num_cpus=C["ray"]["num_cpus"],
)
ps = ParameterServer.remote(model, optimizer)
workers = []
for i in range(C["ray"]["num_workers"]):
worker = DataWorker.remote(model, batch_size=C["training"]["batch_size"], seed=i)
workers.append(worker)
current_weights = ps.get_weights.remote()
for i in range(C["training"]["iterations"]):
gradients = [worker.compute_gradients.remote(current_weights) for worker in workers]
current_weights = ps.apply_gradients.remote(*gradients)
if i % C["evaluation"]["frequency"] == 0:
set_model_weights(model, ray.get(current_weights))
accuracy = evaluate(model, C["evaluation"]["batch_size"])
print(f"{i} \taccuracy: {accuracy:.3f}")
print(f"Final \taccuracy: {accuracy:.3f}")
ray.shutdown()
ray.init(
ignore_reinit_error=True,
object_store_memory=C["ray"]["object_store_memory"],
num_cpus=C["ray"]["num_cpus"],
)
ps = ParameterServer.remote(model, optimizer)
workers = []
for i in range(C["ray"]["num_workers"]):
worker = DataWorker.remote(model, batch_size=C["training"]["batch_size"], seed=i)
workers.append(worker)
current_weights = ps.get_weights.remote()
gradients = {}
for worker in workers:
gradients[worker.compute_gradients.remote(current_weights)] = worker
for i in range(C["training"]["iterations"] * C["ray"]["num_workers"]):
ready_gradient_list, _ = ray.wait(list(gradients))
ready_gradient_id = ready_gradient_list[0]
worker = gradients.pop(ready_gradient_id)
current_weights = ps.apply_gradients.remote(*[ready_gradient_id])
gradients[worker.compute_gradients.remote(current_weights)] = worker
if i % C["evaluation"]["frequency"] == 0:
set_model_weights(model, ray.get(current_weights))
accuracy = evaluate(model, C["evaluation"]["batch_size"])
print(f"{i} \taccuracy: {accuracy:.3f}")
print(f"Final \taccuracy: {accuracy:.3f}")
ray.shutdown()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: But working with this set of dictionaries would not be nearly as fast or easy as a Pandas dataframe, so I soon converted it to that and went ahead and summarized all the features right away
Step2: The summary indicated a large standard deviation for many of the features, and also a lot of missing values in the data for various features. This is not ideal from a machine learning perspective, so I will have to train a model robust enough to work despite these problems. Features with almost no non-null values, such as loan_advances and restricted_stock_deferred, were also dropped.
Step3: This result suggested that most features have large outliers (larger than 3 standard deviations). In order to be careful not to remove any useful data, I manually inspected all rows with large outliers to see any values that seem appropriate for removal
Step4: Looking through these, I fond one instance of a valid outlier - Mark A. Frevert. As he was the CEO of Enron, his salary and bonus were both rightfully much higher than most of the other entries and yet he was still not clasified as a POI. Because I suspect POIs will have larger bonuses and salaries than other people, I removed him from the dataset but keep the rest (such as LAY KENNETH L, who also has a very large salary and bonux but is a POI).
Step5: The results indicated that 'exercised_stock_options', 'total_stock_value', and 'bonus' are the most promising features. Just for fun, I went ahead and plotted these features to see if I could visually verify their signifiance
Step6: As well as one that is not strongly correlated to see correlations are meaningful
Step7: The data and plots above indicated that the exercised_stock_options, total_stock_value, and restricted_stock, and to a lesser extent to payment related information (total_payments, salary, bonus, and expenses), are all correlated with poi. Therefore, I created new features as sums and ratios of these ones. Working with Pandas made this incredibely easy due to vectorized operations.
Step8: It appeared that several of my features are among the most useful, as 'poi_email_ratio_to', 'stock_sum', and 'money_total' are all ranked highly. But, since the data is so small I had no need to get rid of any of the features and went ahead with testing several classifiers with several sets of features.
Step9: Then, I could go right back to Pandas to plot the results. Sure, I could do this with matplotlib just as well, but the flexibility and simplicity of the 'plot' function call on a DataFrame makes it much less annoying to use in my opinion.
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import matplotlib
import pickle
import pandas as pd
import numpy as np
from IPython.display import display
%matplotlib notebook
enron_data = pickle.load(open("./ud120-projects/final_project/final_project_dataset.pkl", "rb"))
print("Number of people: %d"%len(enron_data.keys()))
print("Number of features per person: %d"%len(list(enron_data.values())[0]))
print("Number of POI: %d"%sum([1 if x['poi'] else 0 for x in enron_data.values()]))
df = pd.DataFrame.from_dict(enron_data)
del df['TOTAL']
df = df.transpose()
numeric_df = df.apply(pd.to_numeric, errors='coerce')
del numeric_df['email_address']
numeric_df.describe()
del numeric_df['loan_advances']
del numeric_df['restricted_stock_deferred']
del numeric_df['director_fees']
std = numeric_df.apply(lambda x: np.abs(x - x.mean()) / x.std())
std = std.fillna(std.mean())
std.describe()
outliers = std.apply(lambda x: x > 5).any(axis=1)
outlier_df = pd.DataFrame(index=numeric_df[outliers].index)
for col in numeric_df.columns:
outlier_df[str((col,col+'_std'))] = list(zip(numeric_df[outliers][col],std[outliers][col]))
display(outlier_df)
numeric_df.drop('FREVERT MARK A',inplace=True)
df.drop('FREVERT MARK A',inplace=True)
corr = numeric_df.corr()
print('\nCorrelations between features to POI:\n ' +str(corr['poi']))
numeric_df.hist(column='exercised_stock_options',by='poi',bins=25,sharex=True,sharey=True)
plt.suptitle("exercised_stock_options by POI")
numeric_df.hist(column='total_stock_value',by='poi',bins=25,sharex=True,sharey=True)
plt.suptitle("total_stock_value by POI")
numeric_df.hist(column='bonus',by='poi',bins=25,sharex=True,sharey=True)
plt.suptitle("bonus by POI")
numeric_df.hist(column='to_messages',by='poi',bins=25,sharex=True,sharey=True)
plt.suptitle("to_messages by POI")
#Get rid of label
del numeric_df['poi']
poi = df['poi']
#Create new features
numeric_df['stock_sum'] = numeric_df['exercised_stock_options'] +\
numeric_df['total_stock_value'] +\
numeric_df['restricted_stock']
numeric_df['stock_ratio'] = numeric_df['exercised_stock_options']/numeric_df['total_stock_value']
numeric_df['money_total'] = numeric_df['salary'] +\
numeric_df['bonus'] -\
numeric_df['expenses']
numeric_df['money_ratio'] = numeric_df['bonus']/numeric_df['salary']
numeric_df['email_ratio'] = numeric_df['from_messages']/(numeric_df['to_messages']+numeric_df['from_messages'])
numeric_df['poi_email_ratio_from'] = numeric_df['from_poi_to_this_person']/numeric_df['to_messages']
numeric_df['poi_email_ratio_to'] = numeric_df['from_this_person_to_poi']/numeric_df['from_messages']
#Feel in NA values with 'marker' value outside range of real values
numeric_df = numeric_df.fillna(numeric_df.mean())
#Scale to 1-0
numeric_df = (numeric_df-numeric_df.min())/(numeric_df.max()-numeric_df.min())
from sklearn.feature_selection import SelectKBest
selector = SelectKBest()
selector.fit(numeric_df,poi.tolist())
scores = {numeric_df.columns[i]:selector.scores_[i] for i in range(len(numeric_df.columns))}
sorted_features = sorted(scores,key=scores.get, reverse=True)
for feature in sorted_features:
print('Feature %s has value %f'%(feature,scores[feature]))
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.grid_search import RandomizedSearchCV, GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import precision_score, recall_score, accuracy_score
from sklearn.cross_validation import StratifiedShuffleSplit
import scipy
import warnings
warnings.filterwarnings('ignore')
gnb_clf = GridSearchCV(GaussianNB(),{})
#No params to tune for for linear bayes, use for convenience
svc_clf = SVC()
svc_search_params = {'C': scipy.stats.expon(scale=1),
'gamma': scipy.stats.expon(scale=.1),
'kernel': ['linear','poly','rbf'],
'class_weight':['balanced',None]}
svc_search = RandomizedSearchCV(svc_clf,
param_distributions=svc_search_params,
n_iter=25)
tree_clf = DecisionTreeClassifier()
tree_search_params = {'criterion':['gini','entropy'],
'max_leaf_nodes':[None,25,50,100,1000],
'min_samples_split':[2,3,4],
'max_features':[0.25,0.5,0.75,1.0]}
tree_search = GridSearchCV(tree_clf,
tree_search_params,
scoring='recall')
search_methods = [gnb_clf,svc_search,tree_search]
average_accuracies = [[0],[0],[0]]
average_precision = [[0],[0],[0]]
average_recall = [[0],[0],[0]]
num_splits = 10
train_split = 0.9
indices = list(StratifiedShuffleSplit(poi.tolist(),
num_splits,
test_size=1-train_split,
random_state=0))
best_features = None
max_score = 0
best_classifier = None
num_features = 0
for num_features in range(1,len(sorted_features)+1):
features = sorted_features[:num_features]
feature_df = numeric_df[features]
for classifier_idx in range(3):
sum_values = [0,0,0]
#Only do parameter search once, too wasteful to do a ton
search_methods[classifier_idx].fit(feature_df.iloc[indices[0][0],:],
poi[indices[0][0]].tolist())
classifier = search_methods[classifier_idx].best_estimator_
for split_idx in range(num_splits):
train_indices, test_indices = indices[split_idx]
train_data = (feature_df.iloc[train_indices,:],poi[train_indices].tolist())
test_data = (feature_df.iloc[test_indices,:],poi[test_indices].tolist())
classifier.fit(train_data[0],train_data[1])
predicted = classifier.predict(test_data[0])
sum_values[0]+=accuracy_score(predicted,test_data[1])
sum_values[1]+=precision_score(predicted,test_data[1])
sum_values[2]+=recall_score(predicted,test_data[1])
avg_acc,avg_prs,avg_recall = [val/num_splits for val in sum_values]
average_accuracies[classifier_idx].append(avg_acc)
average_precision[classifier_idx].append(avg_prs)
average_recall[classifier_idx].append(avg_recall)
score = (avg_prs+avg_recall)/2
if score>max_score and avg_prs>0.3 and avg_recall>0.3:
max_score = score
best_features = features
best_classifier = search_methods[classifier_idx].best_estimator_
print('Best classifier found is %s \n\
with score (recall+precision)/2 of %f\n\
and feature set %s'%(str(best_classifier),max_score,best_features))
results = pd.DataFrame.from_dict({'Naive Bayes': average_accuracies[0],
'SVC':average_accuracies[1],
'Decision Tree':average_accuracies[2]})
results.plot(xlim=(1,len(sorted_features)-1),ylim=(0,1))
plt.suptitle("Classifier accuracy by # of features")
results = pd.DataFrame.from_dict({'Naive Bayes': average_precision[0],
'SVC':average_precision[1],
'Decision Tree':average_precision[2]})
results.plot(xlim=(1,len(sorted_features)-1),ylim=(0,1))
plt.suptitle("Classifier precision by # of features")
results = pd.DataFrame.from_dict({'Naive Bayes': average_recall[0],
'SVC':average_recall[1],
'Decision Tree':average_recall[2]})
results.plot(xlim=(1,len(sorted_features)-1),ylim=(0,1))
plt.suptitle("Classifier recall by # of features")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Standalone Model Card Toolkit Demo
Step2: Did you restart the runtime?
Step3: Model
Step4: Dataset
Step5: Use the Model Card Toolkit
Step6: Annotate the Model Card
Step7: Quantitative Analysis
Step8: Considerations
Step9: Graph Fields
Step10: Now we can add them to our ModelCard.
Step11: Generate the Model Card
Step12: Now, the ModelCardToolkit can generate a Model Card document with ModelCardToolkit.export_format().
Step13: You can also output a Model Card in other formats, like Markdown.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install --upgrade pip==21.3
!pip install 'model-card-toolkit>=1.0.0,<1.1'
!pip install 'tensorflow>=2.3.1'
import tensorflow as tf
import numpy as np
import model_card_toolkit as mctlib
from model_card_toolkit.documentation.examples import cats_vs_dogs
from model_card_toolkit.utils.graphics import figure_to_base64str
import tempfile
import matplotlib.pyplot as plt
from IPython import display
import requests
import os
import zipfile
URL = 'https://storage.googleapis.com/cats_vs_dogs_model/cats_vs_dogs_model.zip'
BASE_PATH = tempfile.mkdtemp()
ZIP_PATH = os.path.join(BASE_PATH, 'cats_vs_dogs_model.zip')
MODEL_PATH = os.path.join(BASE_PATH,'cats_vs_dogs_model')
r = requests.get(URL, allow_redirects=True)
open(ZIP_PATH, 'wb').write(r.content)
with zipfile.ZipFile(ZIP_PATH, 'r') as zip_ref:
zip_ref.extractall(BASE_PATH)
model = tf.keras.models.load_model(MODEL_PATH)
def compute_accuracy(data):
x = np.stack(data['examples'])
y = np.asarray(data['labels'])
_, metric = model.evaluate(x, y)
return metric
examples = cats_vs_dogs.get_data()
print('num validation examples:', len(examples['combined']['examples']))
print('num cat examples:', len(examples['cat']['examples']))
print('num dog examples:', len(examples['dog']['examples']))
accuracy = compute_accuracy(examples['combined'])
cat_accuracy = compute_accuracy(examples['cat'])
dog_accuracy = compute_accuracy(examples['dog'])
# https://github.com/tensorflow/model-card-toolkit/blob/master/model_card_toolkit/model_card_toolkit.py
model_card_dir = tempfile.mkdtemp()
mct = mctlib.ModelCardToolkit(model_card_dir)
# https://github.com/tensorflow/model-card-toolkit/blob/master/model_card_toolkit/model_card.py
model_card = mct.scaffold_assets()
model_card.model_details.name = 'Fine-tuned MobileNetV2 Model for Cats vs. Dogs'
model_card.model_details.overview = (
'This model distinguishes cat and dog images. It uses the MobileNetV2 '
'architecture (https://arxiv.org/abs/1801.04381) and is trained on the '
'Cats vs Dogs dataset '
'(https://www.tensorflow.org/datasets/catalog/cats_vs_dogs). This model '
'performed with high accuracy on both Cat and Dog images.'
)
model_card.model_details.owners = [
mctlib.Owner(name='Model Cards Team', contact='model-cards@google.com')
]
model_card.model_details.version = mctlib.Version(name='v1.0', date='08/28/2020')
model_card.model_details.references = [
mctlib.Reference(reference='https://www.tensorflow.org/guide/keras/transfer_learning'),
mctlib.Reference(reference='https://arxiv.org/abs/1801.04381'),
]
model_card.model_details.licenses = [mctlib.License(identifier='Apache-2.0')]
model_card.model_details.citations = [mctlib.Citation(citation='https://github.com/tensorflow/model-card-toolkit/blob/master/model_card_toolkit/documentation/examples/Standalone_Model_Card_Toolkit_Demo.ipynb')]
model_card.quantitative_analysis.performance_metrics = [
mctlib.PerformanceMetric(type='accuracy', value=str(accuracy)),
mctlib.PerformanceMetric(type='accuracy', value=str(cat_accuracy), slice='cat'),
mctlib.PerformanceMetric(type='accuracy', value=str(dog_accuracy), slice='Dog'),
]
model_card.considerations.use_cases = [
mctlib.UseCase(description='This model classifies images of cats and dogs.')
]
model_card.considerations.limitations = [
mctlib.Limitation(description='This model is not able to classify images of other classes.')
]
model_card.considerations.ethical_considerations = [mctlib.Risk(
name=
'While distinguishing between cats and dogs is generally agreed to be '
'a benign application of machine learning, harmful results can occur '
'when the model attempts to classify images that don’t contain cats or '
'dogs.',
mitigation_strategy=
'Avoid application on non-dog and non-cat images.'
)]
# Validation Set Size Bar Chart
fig, ax = plt.subplots()
width = 0.75
rects0 = ax.bar(0, len(examples['combined']['examples']), width, label='Overall')
rects1 = ax.bar(1, len(examples['cat']['examples']), width, label='Cat')
rects2 = ax.bar(2, len(examples['dog']['examples']), width, label='Dog')
ax.set_xticks(np.arange(3))
ax.set_xticklabels(['Overall', 'Cat', 'Dog'])
ax.set_ylabel('Validation Set Size')
ax.set_xlabel('Slices')
ax.set_title('Validation Set Size for Slices')
validation_set_size_barchart = figure_to_base64str(fig)
# Acuracy Bar Chart
fig, ax = plt.subplots()
width = 0.75
rects0 = ax.bar(0, accuracy, width, label='Overall')
rects1 = ax.bar(1, cat_accuracy, width, label='Cat')
rects2 = ax.bar(2, dog_accuracy, width, label='Dog')
ax.set_xticks(np.arange(3))
ax.set_xticklabels(['Overall', 'Cat', 'Dog'])
ax.set_ylabel('Accuracy')
ax.set_xlabel('Slices')
ax.set_title('Accuracy on Slices')
accuracy_barchart = figure_to_base64str(fig)
model_card.model_parameters.data.append(mctlib.Dataset())
model_card.model_parameters.data[0].graphics.collection = [
mctlib.Graphic(name='Validation Set Size', image=validation_set_size_barchart),
]
model_card.quantitative_analysis.graphics.collection = [
mctlib.Graphic(name='Accuracy', image=accuracy_barchart),
]
mct.update_model_card(model_card)
# Generate a model card document in HTML (default)
html_doc = mct.export_format()
# Display the model card document in HTML
display.display(display.HTML(html_doc))
# Generate a model card document in Markdown
md_path = os.path.join(model_card_dir, 'template/md/default_template.md.jinja')
md_doc = mct.export_format(template_path=md_path, output_file='model_card.md')
# Display the model card document in Markdown
display.display(display.Markdown(md_doc))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: pre-defined colors
|
<ASSISTANT_TASK:>
Python Code:
df.plot.line('Time' , ['Sig1', 'Sig2', 'Sig3'], color=['c' , 'm' , 'y'])
df.plot.line('Time' , ['Sig1', 'Sig2', 'Sig3'], color=['#800000' , '#008000' , '#000080'])
df.plot.line('Time' , ['Sig1', 'Sig2', 'Sig3'], color=['r' , 'g' , 'b'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Functions
Step2: You can also copy and paste the functions several times with different values for a composition of motions
|
<ASSISTANT_TASK:>
Python Code:
# click on this cell and press Shift+Enter
import packages.initialization
import pioneer3dx as p3dx
p3dx.init()
# Move forward
p3dx.move(2.5,2.5)
p3dx.sleep(1)
p3dx.stop()
# Move backward
p3dx.move(-2.5,-2.5)
p3dx.sleep(1)
p3dx.stop()
# Turn left
p3dx.move(-2.5,2.5)
p3dx.sleep(1)
p3dx.stop()
# Turn Right
p3dx.move(2.5,-2.5)
p3dx.sleep(1)
p3dx.stop()
# Your own wonderful motion sequence
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Stochastic Kinetic Model
Step2: State Space Representation
Step3: In this model, the state space can be visualized as a 2-D lattice of nonnegative integers, where each point $(x_1, x_2)$ denotes the number of smileys versus the zombies.
Step4: Generic code to simulate an SKM
Step5: A simple ecosystem
Step6: A simple ecological network
Step7: Alternative model
Step8: 🙀
Step9: From Diaconis and Freedman
Step10: A random switching system
Step11: Polya Urn Models
Step12: Polya
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pylab as plt
N = 100
T = 100
a = 0.9
xm = 0.9
sP = np.sqrt(0.001)
sR = np.sqrt(0.01)
x1 = np.zeros(N)
x2 = np.zeros(N)
y = np.zeros(N)
for i in range(N):
if i==0:
x1[0] = xm
x2[0] = 0
else:
x1[i] = xm + a*x1[i-1] + np.random.normal(0, sP)
x2[i] = x2[i-1] + x1[i-1]
y[i] = np.cos(2*np.pi*x2[i]/T) + np.random.normal(0, sR)
plt.figure()
plt.plot(x)
plt.figure()
plt.plot(y)
plt.show()
%matplotlib inline
import numpy as np
import matplotlib.pylab as plt
A = np.array([[1,0],[1,1],[0,1]])
B = np.array([[2,0],[0,2],[0,0]])
S = B-A
N = S.shape[1]
M = S.shape[0]
STEPS = 50000
k = np.array([0.8,0.005, 0.3])
X = np.zeros((N,STEPS))
x = np.array([100,100])
T = np.zeros(STEPS)
t = 0
for i in range(STEPS-1):
rho = k*np.array([x[0], x[0]*x[1], x[1]])
srho = np.sum(rho)
if srho == 0:
break
idx = np.random.choice(M, p=rho/srho)
dt = np.random.exponential(scale=1./srho)
x = x + S[idx,:]
t = t + dt
X[:, i+1] = x
T[i+1] = t
plt.figure(figsize=(10,5))
plt.plot(T,X[0,:], '.b')
plt.plot(T,X[1,:], '.r')
plt.legend([u'Smiley',u'Zombie'])
plt.show()
plt.figure(figsize=(10,5))
plt.plot(X[0,:],X[1,:], '.')
plt.xlabel('# of Smileys')
plt.ylabel('# of Zombies')
plt.axis('square')
plt.show()
%matplotlib inline
import networkx as nx
import numpy as np
import matplotlib.pylab as plt
from itertools import product
# Maximum number of smileys or zombies
N = 20
#A = np.array([[1,0],[1,1],[0,1]])
#B = np.array([[2,0],[0,2],[0,0]])
#S = B-A
k = np.array([0.6,0.05, 0.3])
G = nx.DiGraph()
pos = [u for u in product(range(N),range(N))]
idx = [u[0]*N+u[1] for u in pos]
G.add_nodes_from(idx)
edge_colors = []
edges = []
for y,x in product(range(N),range(N)):
source = (x,y)
rho = k*np.array([source[0], source[0]*source[1], source[1]])
srho = np.sum(rho)
if srho==0:
srho = 1.
if x<N-1: # Birth
target = (x+1,y)
edges.append((source[0]*N+source[1], target[0]*N+target[1]))
edge_colors.append(rho[0]/srho)
if y<N-1 and x>0: # Consumption
target = (x-1,y+1)
edges.append((source[0]*N+source[1], target[0]*N+target[1]))
edge_colors.append(rho[1]/srho)
if y>0: # Death
target = (x,y-1)
edges.append((source[0]*N+source[1], target[0]*N+target[1]))
edge_colors.append(rho[2]/srho)
G.add_edges_from(edges)
col_dict = {u: c for u,c in zip(edges, edge_colors)}
cols = [col_dict[u] for u in G.edges() ]
plt.figure(figsize=(9,9))
nx.draw(G, pos, arrows=False, width=2, node_size=20, node_color="white", edge_vmin=0,edge_vmax=0.7, edge_color=cols, edge_cmap=plt.cm.gray_r )
plt.xlabel('# of smileys')
plt.ylabel('# of zombies')
#plt.gca().set_visible('on')
plt.show()
def simulate_skm(A, B, k, x0, STEPS=1000):
S = B-A
N = S.shape[1]
M = S.shape[0]
X = np.zeros((N,STEPS))
x = x0
T = np.zeros(STEPS)
t = 0
X[:,0] = x
for i in range(STEPS-1):
# rho = k*np.array([x[0]*x[2], x[0], x[0]*x[1], x[1]])
rho = [k[j]*np.prod(x**A[j,:]) for j in range(M)]
srho = np.sum(rho)
if srho == 0:
break
idx = np.random.choice(M, p=rho/srho)
dt = np.random.exponential(scale=1./srho)
x = x + S[idx,:]
t = t + dt
X[:, i+1] = x
T[i+1] = t
return X,T
#%matplotlib nbagg
%matplotlib inline
import numpy as np
import matplotlib.pylab as plt
A = np.array([[1,1],[1,0]])
B = np.array([[2,0],[0,1]])
k = np.array([0.02,0.3])
x0 = np.array([10,40])
X,T = simulate_skm(A,B,k,x0,STEPS=10000)
plt.figure(figsize=(10,5))
plt.plot(T,X[0,:], '.b',ms=2)
plt.plot(T,X[1,:], '.g',ms=2)
plt.legend([u'Rabbit', u'Clover'])
plt.show()
#%matplotlib nbagg
%matplotlib inline
import numpy as np
import matplotlib.pylab as plt
A = np.array([[1,0,1],[1,0,0],[1,1,0],[0,1,0]])
B = np.array([[2,0,0],[0,0,1],[0,2,0],[0,0,1]])
#k = np.array([0.02,0.09, 0.001, 0.3])
#x0 = np.array([1000,1000,10000])
k = np.array([0.02,0.19, 0.001, 2.8])
x0 = np.array([1000,1,10000])
X,T = simulate_skm(A,B,k,x0,STEPS=50000)
plt.figure(figsize=(10,5))
plt.plot(T,X[0,:], '.y',ms=2)
plt.plot(T,X[1,:], '.r',ms=2)
plt.plot(T,X[2,:], '.g',ms=2)
plt.legend([u'Rabbit',u'Wolf',u'Clover'])
plt.show()
sm = int(sum(X[:,0]))+1
Hist = np.zeros((sm,sm))
STEPS = X.shape[1]
for i in range(STEPS):
Hist[int(X[1,i]),int(X[0,i])] = Hist[int(X[1,i]),int(X[0,i])] + 1
plt.figure(figsize=(10,5))
#plt.plot(X[0,:],X[1,:], '.',ms=1)
plt.imshow(Hist,interpolation='nearest')
plt.xlabel('# of Rabbits')
plt.ylabel('# of Wolfs')
plt.gca().invert_yaxis()
#plt.axis('square')
plt.show()
%matplotlib inline
import networkx as nx
import numpy as np
import matplotlib.pylab as plt
# Maximum number of rabbits or wolves
N = 30
k = np.array([0.005,0.06, 0.001, 0.1])
G = nx.DiGraph()
pos = [u for u in product(range(N),range(N))]
idx = [u[0]*N+u[1] for u in pos]
G.add_nodes_from(idx)
edge_colors = []
edges = []
for y,x in product(range(N),range(N)):
clover = N - (x+y)
source = (x,y)
rho = k*np.array([source[0]*clover, source[0], source[0]*source[1], source[1]])
srho = np.sum(rho)
if srho==0:
srho = 1.
if x<N-1: # Rabbit Birth
target = (x+1,y)
edges.append((source[0]*N+source[1], target[0]*N+target[1]))
edge_colors.append(rho[0]/srho)
if y<N-1 and x>0: # Consumption
target = (x-1,y+1)
edges.append((source[0]*N+source[1], target[0]*N+target[1]))
edge_colors.append(rho[2]/srho)
# if y>0: # Wolf Death
# target = (x,y-1)
# edges.append((source[0]*N+source[1], target[0]*N+target[1]))
# edge_colors.append(rho[3]/srho)
# if x>0: # Rabbit Death
# target = (x-1,y)
# edges.append((source[0]*N+source[1], target[0]*N+target[1]))
# edge_colors.append(rho[1]/srho)
G.add_edges_from(edges)
col_dict = {u: c for u,c in zip(edges, edge_colors)}
cols = [col_dict[u] for u in G.edges() ]
plt.figure(figsize=(5,5))
nx.draw(G, pos, arrows=False, width=2, node_size=20, node_color="white", edge_vmin=0,edge_vmax=0.4, edge_color=cols, edge_cmap=plt.cm.gray_r )
plt.xlabel('# of smileys')
plt.ylabel('# of zombies')
#plt.gca().set_visible('on')
plt.show()
#%matplotlib nbagg
%matplotlib inline
import numpy as np
import matplotlib.pylab as plt
A = np.array([[1,0,1],[1,1,0],[0,1,0],[0,1,0]])
B = np.array([[2,0,1],[0,1,0],[0,2,0],[0,0,0]])
k = np.array([4.0,0.038, 0.02, 0.01])
x0 = np.array([50,100,1])
X,T = simulate_skm(A,B,k,x0,STEPS=10000)
plt.figure(figsize=(10,5))
plt.plot(T,X[0,:], '.b',ms=2)
plt.plot(T,X[1,:], '.r',ms=2)
plt.plot(T,X[2,:], '.g',ms=2)
plt.legend([u'Rabbit',u'Wolf',u'Clover'])
plt.show()
#%matplotlib nbagg
%matplotlib inline
import numpy as np
import matplotlib.pylab as plt
death_rate = 1.8
A = np.array([[1,0,0,1],[1,1,0,0],[0,0,1,0],[0,0,1,0],[0,0,1,0],[0,1,0,0]])
B = np.array([[2,0,0,1],[0,0,1,0],[0,1,0,0],[0,2,0,0],[0,0,0,0],[0,0,0,0]])
k = np.array([9.7, 9.5, 30, 3.5, death_rate, death_rate])
x0 = np.array([150,20,10,1])
X,T = simulate_skm(A,B,k,x0,STEPS=5000)
plt.figure(figsize=(10,5))
plt.plot(X[0,:], '.b',ms=2)
plt.plot(X[1,:], 'or',ms=2)
plt.plot(X[2,:], '.r',ms=3)
plt.legend([u'Mouse',u'Hungry Cat',u'Happy Cat'])
plt.show()
%matplotlib inline
import numpy as np
#Diaconis and Freedman fern
%matplotlib inline
import numpy as np
import matplotlib.pylab as plt
T = 3000;
x = np.matrix(np.zeros((2,T)));
x[:,0] = np.matrix('[0.3533; 0]');
A = [np.matrix('[0.444 -0.3733;0.06 0.6000]'), np.matrix('[-0.8 -0.1867;0.1371 0.8]')];
B = [np.matrix('[0.3533;0]'), np.matrix('[1.1;0.1]')];
w = 0.27;
for i in range(T-1):
if np.random.rand()<w:
c = 0;
else:
c = 1;
x[:,i+1] = A[c]*x[:,i] + B[c]
plt.figure(figsize=(5,5))
plt.plot(x[0,:],x[1,:], 'k.',ms=1)
plt.plot(x[0,0:40].T,x[1,0:40].T, 'k:')
plt.axis('equal')
plt.show()
plt.plot(x[0,0:200].T,x[1,0:200].T, 'k-')
plt.axis('equal')
plt.show()
#%matplotlib nbagg
%matplotlib inline
import numpy as np
import matplotlib.pylab as plt
A = np.array([[1,0],[0,1]])
B = np.array([[0,1],[1,0]])
k = np.array([0.5,0.5])
x0 = np.array([0,50])
X,T = simulate_skm(A,B,k,x0,STEPS=10000)
plt.figure(figsize=(10,5))
plt.plot(T,X[0,:], '.b',ms=2)
plt.plot(T,X[1,:], '.g',ms=2)
plt.legend([u'A', u'B'])
plt.show()
plt.hist(X[0,:],range=(0,np.sum(x0)),bins=np.sum(x0))
plt.show()
%matplotlib inline
import numpy as np
import matplotlib.pylab as plt
A = np.array([[1,0],[0,1]])
B = np.array([[2,0],[0,2]])
k = np.array([0.05,0.05])
x0 = np.array([3,1])
X,T = simulate_skm(A,B,k,x0,STEPS=2000)
plt.figure(figsize=(10,5))
plt.plot(T,X[0,:]/(X[0,:]+X[1,:]), '.-',ms=2)
plt.ylim([0,1])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function make_string is a helper function that is used to simplify the implementation of the method __str__.
Step2: Graphical Representation
Step3: The method $t.\texttt{toDot}()$ takes a binary trie $t$ and returns a graph that depicts the tree $t$.
Step4: The method $t.\texttt{collectIDs}(d)$ takes a binary trie $t$ and a dictionary $d$ and updates the dictionary so that the following holds
Step5: Defining $\texttt{Nil}$ and $\texttt{Node}(p, v, l, r)$ as Classes
Step6: The class Node represents a heap of the form $\texttt{Node}(p,v,l,r)$ where
Step7: Implementing the Method top
Step8: For the class Node, the function top is specified via the following equation
Step9: Implementing the method insert
Step10: $p_{\mathrm{top}} \leq p \;\wedge\; l.\texttt{count}() \leq r.\texttt{count}() \;\rightarrow\;
Step11: Implementing the Method remove
Step12: $\texttt{Node}(p,v,\texttt{Nil},r).\texttt{remove}() = r$
Step13: Testing
Step14: Heapsort
|
<ASSISTANT_TASK:>
Python Code:
class Heap:
sNodeCount = 0
def __init__(self):
Heap.sNodeCount += 1
self.mID = str(Heap.sNodeCount)
def getID(self):
return self.mID # used only by graphviz
def _make_string(self, attributes):
# get the name of the class of the object self
name = self.__class__.__name__
# map the function __str__ to all attributes and join them with a comma
return f"{name}({', '.join(map(str, [getattr(self, at) for at in attributes]))})"
Heap._make_string = _make_string
import graphviz as gv
def toDot(self):
dot = gv.Digraph(node_attr={'shape': 'record', 'style': 'rounded'})
nodeDict = {}
self._collectIDs(nodeDict)
for n, t in nodeDict.items():
if isinstance(t, Nil):
dot.node(n, label='', shape='point')
elif isinstance(t, Node):
if t.mValue != None:
dot.node(n, label='{' + str(t.mPriority) + '|' + str(t.mValue) + '}')
else:
dot.node(n, label= str(t.mPriority))
else:
assert False, f'Unknown node {t}'
for n, t in nodeDict.items():
if isinstance(t, Node):
dot.edge(n, t.mLeft .getID())
dot.edge(n, t.mRight.getID())
return dot
Heap.toDot = toDot
def _collectIDs(self, nodeDict):
nodeDict[self.getID()] = self
if isinstance(self, Node):
self.mLeft ._collectIDs(nodeDict)
self.mRight._collectIDs(nodeDict)
Heap._collectIDs = _collectIDs
class Nil(Heap):
def _count(self):
return 0
def __str__(self):
return 'Nil()'
class Node(Heap):
def __init__(self, priority, value, left, right):
Heap.__init__(self)
self.mPriority = priority
self.mValue = value
self.mLeft = left
self.mRight = right
self.mCount = left._count() + right._count() + 1
def _extract(self):
return self.mPriority, self.mValue, self.mLeft, self.mRight
def _count(self):
return self.mCount
def __str__(self):
return _make_string(self, ['mPriority', 'mValue', 'mLeft', 'mRight'])
def top(self):
return None
Nil.top = top
del top
def top(self):
return self.mPriority, self.mValue
Node.top = top
del top
def insert(self, p, v):
return Node(p, v, Nil(), Nil())
Nil.insert = insert
del insert
def insert(self, p, v):
p_top, v_top, l, r = self._extract()
if p_top <= p:
if l._count() <= r._count():
return Node(p_top, v_top, l.insert(p, v), r)
else:
return Node(p_top, v_top, l, r.insert(p, v))
else:
if l._count() <= r._count():
return Node(p, v, l.insert(p_top, v_top), r)
else:
return Node(p, v, l, r.insert(p_top, v_top))
Node.insert = insert
del insert
def remove(self):
return self
Nil.remove = remove
del remove
def remove(self):
p, v, l, r = self._extract()
if isinstance(l, Nil):
return r
if isinstance(r, Nil):
return l
p1, v1, l1, r1 = l._extract()
p2, v2, l2, r2 = r._extract()
if p1 <= p2:
return Node(p1, v1, l.remove(), r)
else:
return Node(p2, v2, l, r.remove())
Node.remove = remove
del remove
h = Nil()
h.toDot()
h = h.insert(2, 'a')
h.toDot()
h = h.insert(1, 'b')
h.toDot()
h = h.insert(7, 'c')
h.toDot()
h = h.insert(0, 'd')
h.toDot()
h = h.insert(8, 'e')
h.toDot()
h = h.insert(3, 'f')
h.toDot()
h = h.insert(4, 'g')
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = Nil()
h.toDot()
for i in range(1, 63+1):
h = h.insert(i, None)
h.toDot()
def heap_sort(L):
H = Nil()
for p in L:
H = H.insert(p, None)
S = []
display(H.toDot())
while isinstance(H, Node):
p, _ = H.top()
S.append(p)
H = H.remove()
return S
heap_sort([77, 54, 68, 7, 13, 1, 4, 5, 6, 3, 12, 67, 12, 14, 23, 54, 67])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Add TensorFlow Slim Model Zoo to path
Step2: The Inception v1 (GoogLeNet) Architecture|
Step3: Build the model and select layers we need - the features are taken from the final network layer, before the softmax nonlinearity.
Step4: Display the network layout graph on TensorBoard
Step5: Load an Example Image
Step6: Run using the Example Image
Step7: Use the Network to create 'features' for the training images
Step8: Build an SVM model over the features
Step9: Use the SVM model to classify the test set
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
import scipy
import matplotlib.pyplot as plt
%matplotlib inline
import time
from urllib.request import urlopen # Python 3+ version (instead of urllib2)
CLASS_DIR='./images/cars'
#CLASS_DIR='./images/seefood' # for HotDog vs NotHotDog
import os, sys
slim_models_dir = './models/tensorflow_zoo'
if not os.path.exists(slim_models_dir):
print("Creating model/tensorflow_zoo directory")
os.makedirs(slim_models_dir)
if not os.path.isfile( os.path.join(slim_models_dir, 'models', 'README.md') ):
print("Cloning tensorflow model zoo under %s" % (slim_models_dir, ))
!cd {slim_models_dir}; git clone https://github.com/tensorflow/models.git
sys.path.append(slim_models_dir + "/models/slim")
print("Model Zoo model code installed")
from datasets import dataset_utils
targz = "inception_v1_2016_08_28.tar.gz"
url = "http://download.tensorflow.org/models/"+targz
checkpoints_dir = './data/tensorflow_zoo/checkpoints'
if not os.path.exists(checkpoints_dir):
os.makedirs(checkpoints_dir)
if not os.path.isfile( os.path.join(checkpoints_dir, 'inception_v1.ckpt') ):
tarfilepath = os.path.join(checkpoints_dir, targz)
if os.path.isfile(tarfilepath):
import tarfile
tarfile.open(tarfilepath, 'r:gz').extractall(checkpoints_dir)
else:
dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
# Get rid of tarfile source (the checkpoint itself will remain)
os.unlink(tarfilepath)
print("Checkpoint available locally")
slim = tf.contrib.slim
from nets import inception
from preprocessing import inception_preprocessing
image_size = inception.inception_v1.default_image_size
image_size
imagenet_labels_file = './data/imagenet_synset_words.txt'
if os.path.isfile(imagenet_labels_file):
print("Loading ImageNet synset data locally")
with open(imagenet_labels_file, 'r') as f:
imagenet_labels = {0: 'background'}
for i, line in enumerate(f.readlines()):
# n01440764 tench, Tinca tinca
synset,human = line.strip().split(' ', 1)
imagenet_labels[ i+1 ] = human
else:
print("Downloading ImageNet synset data from repo")
from datasets import imagenet
imagenet_labels = imagenet.create_readable_names_for_imagenet_labels()
print("ImageNet synset labels available")
tf.reset_default_graph()
# This creates an image 'placeholder'
# input_image = tf.image.decode_jpeg(image_string, channels=3)
input_image = tf.placeholder(tf.uint8, shape=[None, None, 3], name='input_image')
# Define the pre-processing chain within the graph - based on the input 'image' above
processed_image = inception_preprocessing.preprocess_image(input_image, image_size, image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# Reverse out some of the transforms, so we can see the area/scaling of the inception input
numpyish_image = tf.multiply(processed_image, 0.5)
numpyish_image = tf.add(numpyish_image, 0.5)
numpyish_image = tf.multiply(numpyish_image, 255.0)
# Create the model - which uses the above pre-processing on image
# it also uses the default arg scope to configure the batch norm parameters.
print("Model builder starting")
# Here is the actual model zoo model being instantiated :
with slim.arg_scope(inception.inception_v1_arg_scope()):
logits, _ = inception.inception_v1(processed_images, num_classes=1001, is_training=False)
probabilities = tf.nn.softmax(logits)
# Create an operation that loads the pre-trained model from the checkpoint
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
slim.get_model_variables('InceptionV1')
)
print("Model defined")
#writer = tf.summary.FileWriter(logdir='../tensorflow.logdir/', graph=tf.get_default_graph())
#writer.flush()
if False:
# Read from the Web
from io import BytesIO
url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg'
image_string = urlopen(url).read()
im = plt.imread(BytesIO(image_string), format='jpg')
if False:
# Read from a file via a queue ==> brain damage in jupyter
#filename_queue = tf.train.string_input_producer( tf.train.match_filenames_once("./images/*.jpg") )
filename_queue = tf.train.string_input_producer( ['./images/cat-with-tongue_224x224.jpg'] )
#_ = filename_queue.dequeue() # Ditch the first value
image_reader = tf.WholeFileReader()
_, image_string = image_reader.read(filename_queue)
# Read from a file
im = plt.imread("./images/cat-with-tongue_224x224.jpg")
print(im.shape, im[0,0]) # (height, width, channels), (uint8, uint8, uint8)
def crop_middle_square_area(np_image):
h, w, _ = np_image.shape
h = int(h/2)
w = int(w/2)
if h>w:
return np_image[ h-w:h+w, : ]
return np_image[ :, w-h:w+h ]
im_sq = crop_middle_square_area(im)
im_sq.shape
# Now let's run the pre-trained model
with tf.Session() as sess:
# This is the loader 'op' we defined above
init_fn(sess)
# This is two ops : one merely loads the image from numpy,
# the other runs the network to get the class probabilities
np_image, np_probs = sess.run([numpyish_image, probabilities], feed_dict={input_image:im_sq})
# These are regular numpy operations
probs = np_probs[0, :]
sorted_inds = [i[0] for i in sorted(enumerate(-probs), key=lambda x:x[1])]
# And now plot out the results
plt.figure()
plt.imshow(np_image.astype(np.uint8))
plt.axis('off')
plt.show()
for i in range(5):
index = sorted_inds[i]
print('Probability %0.2f%% => [%s]' % (probs[index], imagenet_labels[index]))
import os
classes = sorted( [ d for d in os.listdir(CLASS_DIR) if os.path.isdir("%s/%s" % (CLASS_DIR, d)) ] )
classes # Sorted for for consistency
train = dict(filepath=[], features=[], target=[])
with tf.Session() as sess:
# This is the loader 'op' we defined above
init_fn(sess)
print("Loaded pre-trained model")
t0 = time.time()
for class_i, directory in enumerate(classes):
for filename in os.listdir("%s/%s" % (CLASS_DIR, directory, )):
filepath = '%s/%s/%s' % (CLASS_DIR, directory, filename, )
if os.path.isdir(filepath): continue
im = plt.imread(filepath)
im_sq = crop_middle_square_area(im)
# This is two ops : one merely loads the image from numpy,
# the other runs the network to get the 'logit features'
rawim, np_logits = sess.run([numpyish_image, logits], feed_dict={input_image:im_sq})
train['filepath'].append(filepath)
train['features'].append(np_logits[0])
train['target'].append( class_i )
plt.figure()
plt.imshow(rawim.astype('uint8'))
plt.axis('off')
plt.text(320, 50, '{}'.format(filename), fontsize=14)
plt.text(320, 80, 'Train as class "{}"'.format(directory), fontsize=12)
print("DONE : %6.2f seconds each" %(float(time.time() - t0)/len(train),))
#train['features'][0]
from sklearn import svm
classifier = svm.LinearSVC()
classifier.fit(train['features'], train['target']) # learn from the data
test_image_files = [f for f in os.listdir(CLASS_DIR) if not os.path.isdir("%s/%s" % (CLASS_DIR, f))]
with tf.Session() as sess:
# This is the loader 'op' we defined above
init_fn(sess)
print("Loaded pre-trained model")
t0 = time.time()
for filename in sorted(test_image_files):
im = plt.imread('%s/%s' % (CLASS_DIR,filename,))
im_sq = crop_middle_square_area(im)
# This is two ops : one merely loads the image from numpy,
# the other runs the network to get the class probabilities
rawim, np_logits = sess.run([numpyish_image, logits], feed_dict={input_image:im_sq})
prediction_i = classifier.predict([ np_logits[0] ])
decision = classifier.decision_function([ np_logits[0] ])
plt.figure()
plt.imshow(rawim.astype('uint8'))
plt.axis('off')
prediction = classes[ prediction_i[0] ]
plt.text(350, 50, '{} : Distance from boundary = {:5.2f}'.format(prediction, decision[0]), fontsize=20)
plt.text(350, 75, '{}'.format(filename), fontsize=14)
print("DONE : %6.2f seconds each" %(float(time.time() - t0)/len(test_image_files),))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.Drucke alle die Zahlen von 0 bis 4 aus
Step2: 4.Baue einen For-Loop, indem Du alle geraden Zahlen ausdruckst, die tiefer sind als 237.
Step3: 5.Addiere alle Zahlen in der Liste
Step4: 6.Addiere nur die Zahlen, die gerade sind
Step5: 7.Drucke mit einem For Loop 5 Mal hintereinander Hello World aus
Step6: 8.Entwickle ein Programm, das alle Nummern zwischen 2000 und 3200 findet, die durch 7, aber nicht durch 5 teilbar sind. Das Ergebnis sollte auf einer Zeile ausgedruckt werden. Tipp
Step7: 9.Schreibe einen For Loop, der die Nummern in der folgenden Liste von int in str verwandelt.
Step8: 10.Schreibe nun ein Programm, das alle Ziffern 4 mit dem Buchstaben A ersetzte, alle Ziffern 5 mit dem Buchtaben B.
|
<ASSISTANT_TASK:>
Python Code:
primzweibissieben = [2, 3, 5, 7]
for prime in primzweibissieben:
print(prime)
for x in range(5):
print(x)
for x in range(3, 6):
print(x)
numbers = [
951, 402, 984, 651, 360, 69, 408, 319, 601, 485, 980, 507, 725, 547, 544,
615, 83, 165, 141, 501, 263, 617, 865, 575, 219, 390, 984, 592, 236, 105, 942, 941,
386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345,
399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217,
815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717,
958, 609, 842, 451, 688, 753, 854, 685, 93, 857, 440, 380, 126, 721, 328, 753, 470,
743, 527
]
# Hier kommt Dein Code:
new_lst = []
for elem in numbers:
if elem < 238 and elem % 2 == 0:
new_lst.append(elem)
else:
continue
print(new_lst)
#Lösung:
sum(numbers)
#Lösung:
evennumber = []
for elem in numbers:
if elem % 2 == 0:
evennumber.append(elem)
sum(evennumber)
Satz = ['Hello World', 'Hello World','Hello World','Hello World','Hello World']
for elem in Satz:
print(elem)
#Lösung
l=[]
for i in range(2000, 3201):
if (i % 7==0) and (i % 5!=0):
l.append(str(i))
print(','.join(l))
lst = range(45,99)
newlst = []
for i in lst:
i = str(i)
newlst.append(i)
print(newlst)
newnewlist = []
for elem in newlst:
if '4' in elem:
elem = elem.replace('4', 'A')
if '5' in elem:
elem = elem.replace('5', 'B')
newnewlist.append(elem)
newnewlist
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Hello World!
Step2: The amazing thing here is that the %f line magic can be used in python code. . .
Step3: 3. line magic is compiled to a python statement
Step4: The %f line magic is compiled into a python statement, that's why. Some more explanations given at the end of this page.
Step5: Now use the [for] loop to print a pyramid
Step6: 5. peforth to access the main name space directly
Step7: The reason why we choose FORTH is for its super flexibility. Now let's redefine the way peforth handles an unknown token, i.e. the x of the above example. Instead of alerting "Error! x unknown." we let it try to find the token in the Jupyter Notebook __main__ module object.
Step8: now test again
Step9: peforth seems know the 'x' and 'y' now while it doesn't. This trick is vary useful when we are studying and we can investigate things in FORTH way.
Step10: 6. Cascading peforth exported functions
Step11: Observed from the above outputs, peforth.ok() and peforth.dictate() both return the peforth module object. This means that we can cascade these functions. The next example sees the type of the 'star' command that we definded above
Step12: It's correctly a 'colon' word. The next example in python code is actually doing the same thing as the above FORTH code
Step13: Where (') and
|
<ASSISTANT_TASK:>
Python Code:
import peforth
%f version .s
%f ." Hello World!" cr
# use %f line magic in a python code function definition,
def hi():
%f ." Hello World!" cr
# believe it or not, it works!
hi()
%f __main__ :> hi .source
%%f Nothing allowed before %%f except white spaces; everything in this line after %%f is ignored.
\ Demonstrating the peforth interpret mode [for]..[next] loop
5 [for] t@ . space [next] cr
\ For experienced FORTH users
\ Where t@ is like the FORTH word r@ but it fetches TIB stack instead of the traditional
\ FORTH return stack. Because TIB is the only resource that belongs to this interpreting
\ life cycle alone.
%%f
: star ." *" ;
: 2stars star star ;
: stars for star next ;
star cr 2stars cr 10 [for] 13 t@ - stars cr [next]
x = 123
%f x . cr
%%f Now we redefine the 'unknown' command (it does nothing by default)
: unknown // ( token -- thing y|n) Try to find the unknown token in __main__
py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=="Ûnknôwn" if drop false else true then ;
\ here after, when FORTH come accross an unknown token, instead of printing the
\ error message, it try to find the token in python __main__ module name space.
y = 'abc'
%f y . cr
%f x . cr
# Run this cell to enter peforth console (REPL loop or command line interface)
# Note the Out[ ] of this cell, the `[*]` indicates that the command line interface
# is running. Play with it or copy-paste this line:
# "star cr 2stars cr 10 [for] 13 t@ - stars cr [next] exit"
# to try again the pyramid example above. 'exit' command to terminate.
peforth.ok()
peforth.dictate(" .' hello world!' cr ") # Note the Out[ ] of this cell
%%f
' star \ get the word object, we defined 'star' above remember?
:> type \ get 'type' attribute of the word object
. cr \ show what we have got ... it is a 'colon' word, isn't it?
# Example of cascaded functions, to check a given FORTH word's type
type_of_star = peforth.push('star').dictate("(') :> type").pop() # cascaded functions
print(type_of_star)
%f help (')
%f help :>
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
np.random.uniform?
http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/
int_words[:10]
## Your code here
from collections import Counter
import random
count = Counter(int_words)
tot = sum(count)
freq = {k: (v / tot) for k, v in count.items()}
np.random.seed(632)
t = 1e-5
p_discard = {word: 1 - np.sqrt(t/freq[word]) for word in int_words}
train_words = [word for word in int_words if random.random() < (1 - p_discard[word])]
len(train_words)
random.sample?
random.sample(range(1, 6), 1)
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
# Your code here
new_size = random.sample(range(1, window_size + 1), 1)[0]
left_index = idx - new_size
right_index = idx + new_size + 1
if left_index < 0 :
left_index = 0
return words[left_index: idx] + words[idx: right_index]
# # test
# words = [1, 2, 3, 4, 5, 6, 7, 8]
# idx = 1
# window_size = 4
# get_target(words, idx, window_size)
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, [None], name='inputs')
labels = tf.placeholder(tf.int32, [None, None], name='labels')
n_vocab = len(int_to_vocab)
n_embedding = 300# Number of embedding features
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform([n_vocab, n_embedding], minval=-1, maxval=1))# create embedding weight matrix here
embed = tf.nn.embedding_lookup(embedding, inputs)# use tf.nn.embedding_lookup to get the hidden layer output
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal([n_vocab, n_embedding], stddev=0.1))# create softmax weight matrix here
softmax_b = tf.Variable(tf.zeros(n_vocab))# create softmax biases here
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, labels, embed, n_sampled, n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Character counting and entropy
Step4: The entropy is a quantiative measure of the disorder of a probability distribution. It is used extensively in Physics, Statistics, Machine Learning, Computer Science and Information Science. Given a set of probabilities $P_i$, the entropy is defined as
Step5: Use IPython's interact function to create a user interface that allows you to type a string into a text box and see the entropy of the character probabilities of the string.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact
def char_probs(s):
Find the probabilities of the unique characters in the string s.
Parameters
----------
s : str
A string of characters.
Returns
-------
probs : dict
A dictionary whose keys are the unique characters in s and whose values
are the probabilities of those characters.
r={}#i think you would have to use dictionary comprihensions and if statements in order to make this work
test1 = char_probs('aaaa')
assert np.allclose(test1['a'], 1.0)
test2 = char_probs('aabb')
assert np.allclose(test2['a'], 0.5)
assert np.allclose(test2['b'], 0.5)
test3 = char_probs('abcd')
assert np.allclose(test3['a'], 0.25)
assert np.allclose(test3['b'], 0.25)
assert np.allclose(test3['c'], 0.25)
assert np.allclose(test3['d'], 0.25)
def entropy(d):
Compute the entropy of a dict d whose values are probabilities.
# YOUR CODE HERE
raise NotImplementedError()
assert np.allclose(entropy({'a': 0.5, 'b': 0.5}), 1.0)
assert np.allclose(entropy({'a': 1.0}), 0.0)
# YOUR CODE HERE
raise NotImplementedError()
assert True # use this for grading the pi digits histogram
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we need to build a function that takes distance, base and value as a parameter and returns the the SLE, STDM, Visualize Cluster Matrix.
Step2: Now we build the widget for each exploration
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.image as mpimg
# Widgets library
from ipywidgets import interact
%matplotlib inline
# We need to load all the files here
# Load the file
folder = '../results/'
name = 'parameter_swep_SLM-0.00-0.00-10.00.png'
file_name = folder + name
image = mpimg.imread(file_name)
# Now let's plot it
figsize = (16, 12)
figure = plt.figure(figsize=figsize)
ax = figure.add_subplot(1, 1, 1)
ax.set_axis_off()
ax.imshow(image)
def load_SLM(base, distance, value):
# Load the image
folder = '../results/'
name = 'parameter_swep_SLM'
parameter_marker = '-{0:4.2f}-{1:4.2f}-{2:4.2f}'.format(base, distance, value)
file_name = folder + name + parameter_marker + '.png'
image = mpimg.imread(file_name)
# Plot
figsize = (16, 12)
figure = plt.figure(figsize=figsize)
ax = figure.add_subplot(1, 1, 1)
ax.set_axis_off()
ax.imshow(image)
def load_STDM(base, distance, value):
folder = '../results/'
name = 'parameter_swep_STDM'
parameter_marker = '-{0:5.2f}-{1:5.2f}-{2:5.2f}'.format(base, distance, value)
file_name = folder + name + parameter_marker + '.png'
image = mpimg.imread(file_name)
# Plot
figsize = (16, 12)
figure = plt.figure(figsize=figsize)
ax = figure.add_subplot(1, 1, 1)
ax.set_axis_off()
ax.imshow(image)
def load_cluster(distance, base, value):
folder = '../results/'
name = 'parameter_swep_cluster'
parameter_marker = '-{0:5.2f}-{1:5.2f}-{2:5.2f}'.format(base, distance, value)
file_name = folder + name + parameter_marker + '.png'
image = mpimg.imread(file_name)
# Plot
figsize = (16, 12)
figure = plt.figure(figsize=figsize)
ax = figure.add_subplot(1, 1, 1)
ax.set_axis_off()
ax.imshow(image)
def load_cluster_SLM(base, distance, value):
folder = '../results/'
name = 'parameter_swep_cluster_SLM'
parameter_marker = '-{0:5.2f}-{1:5.2f}-{2:5.2f}'.format(base, distance, value)
file_name = folder + name + parameter_marker + '.png'
image = mpimg.imread(file_name)
# Plot
figsize = (16, 12)
figure = plt.figure(figsize=figsize)
ax = figure.add_subplot(1, 1, 1)
ax.set_axis_off()
ax.imshow(image)
interact(load_SLM, base=(0, 200, 40), distance=(0, 601, 40), value=(10, 200, 38))
interact(load_STDM, base=(0, 200, 40), distance=(0, 601, 40), value=(10, 200, 38))
interact(load_cluster, base=(0, 200, 40), distance=(0, 601, 40), value=(10, 200, 38))
interact(load_cluster_SLM, base=(0, 200, 40), distance=(0, 601, 40), value=(10, 200, 38))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we have displayed the most basic statistics for each of the MVP canidates, such as points, assists, steals and rebounds a game. As we can see, Westbrook had some of the highests totals in these categories. Westbrook was the second player in histroy to average double digits numbers in points, rebounds and assists in NBA histroy. Many believe that this fact alone should award him the title of MVP. However it is important to know that players who are renowned for their defense, such as Kawhi Leonard arent usually the leaders in these categories, so these statistics can paint an imcomplete picture of how good a player is.
Step2: Player efficiency rating (PER) is a statistic meant to capture all aspects of a player's game to give a measure of overall performance. It is adjusted for pace and minutes and the league average is always 15.0 for comparison. Russell Westbrook leads all MVP candidates with a PER of 30.6. All candidates just about meet or surpass the historical MVP average of 27.42. However, PER is a little flawed as it is much more heavily weighted to offensive statistics. It only takes into account blocks and steals on the defensive side. This favors Westbrook and Harden, who put up stronger offensive numbers than Leonard and James. On the other hand, Westbrook and Harden are not known for being great defenders, while James, and especially Kawhi Leonard (who is a two-time Defensive Player of the Year winner), are two of the top defenders in the NBA.
Step3: According to Basketball Reference, Value over Replacement Player (VORP) provides an "estimate of each player's overall contribution to the team, measured vs. what a theoretical 'replacement player' would provide", where the 'replacement player' is defined as a player with a box plus/minus of -2. By this metric, Russell Westbrook contributes the most to his team, with a VORP of 12.4. Westbrook and James Harden are the only candidates with a VORP above the historical MVP average of 7.62.
Step4: Win shares is a measure of wins a player produces for his team. This statistic is calculated by taking into account how many wins a player has contributed to their team based off of their offensive play, as well as their defensive play.
Step5: Here we try to compare the defensive proudction of each of the MVP canidates. Defensive Win Share is calculated by looking at how a player's respective defensive production translates to wins for a team. A player's estimated points allowed per 100 possesions, marginal defense added, as well as points added in a win are all taken into account to calculate this number. Because points added in a win is used in this calculation, even though it is supposed to be a defensive statistic, there is still some offensive bias. So players that score more points, and win more games could get higher values for this statistic. Despite these possible flaws, we see that Leonard and Westbrook lead the way with DWS of 4.7 and 4.6 respectively. All players, still fall short of the historical MVP average of 5.1.
Step6: Win Shares/ 48 Minutes is another statistic used to measure the wins attributed to a certain player. This statistic is slightly different because instead of just taking in to account how many games the team actually wins over the course of a season, this stat attempts to control for actual minutes played by the player. Here we see that Kawhi Leonard has the highest Win Share, and not Harden. We believe that this is due to the fact that Leonard plays significantly fewer minutes than the other canidates. Leonard is the only player whose WS/48 of .264 surpasses the MVP average of .261
Step7: Usage percentage is a measure of the percentage of team possessions a player uses per game. A higher percentage means a player handles the ball more per game. High usage percentages by one player can often lead to decreased overall efficiency for the team, as it means the offense is ran more through one player. In this case, Russell Westbrook's usage percentage is considerably higher than the other candidates and is the highest usage percentage in NBA history by about 3%. The other candidates are much closer to the historical average MVP usage percentage of 29.77%.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics
import datetime as dt # date tools, used to note current date
import requests
from bs4 import BeautifulSoup
import urllib.request
from matplotlib.offsetbox import OffsetImage
%matplotlib inline
#per game statistics for MVP candidates
url = 'http://www.basketball-reference.com/play-index/pcm_finder.fcgi?request=1&sum=0&player_id1_hint=James+Harden&player_id1_select=James+Harden&player_id1=hardeja01&y1=2017&player_id2_hint=LeBron+James&player_id2_select=LeBron+James&y2=2017&player_id2=jamesle01&player_id3_hint=Kawhi+Leonard&player_id3_select=Kawhi+Leonard&y3=2017&player_id3=leonaka01&player_id4_hint=Russell+Westbrook&player_id4_select=Russell+Westbrook&y4=2017&player_id4=westbru01'
cl = requests.get(url)
soup = BeautifulSoup(cl.content, 'html.parser')
column_headers = [th.getText() for th in
soup.findAll('tr')[0].findAll('th')]
data_rows = soup.findAll('tr')[1:]
player_data = [[td.getText() for td in data_rows[i].findAll('td')]
for i in range(len(data_rows))]
df = pd.DataFrame(player_data, columns=column_headers[1:])
df = df.set_index('Player')
df = df.sort_index(ascending = True)
#getting advanced statistics for MVP candidates
url1 = 'http://www.basketball-reference.com/play-index/psl_finder.cgi?request=1&match=single&per_minute_base=36&per_poss_base=100&type=advanced&season_start=1&season_end=-1&lg_id=NBA&age_min=0&age_max=99&is_playoffs=N&height_min=0&height_max=99&year_min=2017&year_max=2017&birth_country_is=Y&as_comp=gt&pos_is_g=Y&pos_is_gf=Y&pos_is_f=Y&pos_is_fg=Y&pos_is_fc=Y&pos_is_c=Y&pos_is_cf=Y&force%3Apos_is=1&c6mult=1.0&order_by=ws'
bl = requests.get(url1)
soup1 = BeautifulSoup(bl.content, 'html.parser')
column_headers_adv = [th.getText() for th in
soup1.findAll('tr')[1].findAll('th')]
data_rows_adv = soup1.findAll('tr')[2:8]
player_data_adv = [[td.getText() for td in data_rows_adv[i].findAll('td')]
for i in range(len(data_rows_adv))]
df_adv = pd.DataFrame(player_data_adv, columns=column_headers_adv[1:])
df_adv = df_adv.set_index('Player')
#drop other players from list
df_adv = df_adv.drop(['Rudy Gobert', 'Jimmy Butler'])
#sort players alphabetically
df_adv = df_adv.sort_index(ascending = True)
#drop duplicate and unnecessary columns
df_adv = df_adv.drop(['Season', 'Age', 'Tm', 'Lg', 'G', 'GS', 'MP'], axis=1)
#combined table of per game and andvanced statistics
MVP = pd.concat([df, df_adv], axis=1)
MVP
#convert to proper dtypes
MVP = MVP.apply(pd.to_numeric, errors='ignore')
#get per game statistics for MVP winners since 1980
url2 = 'http://www.basketball-reference.com/play-index/psl_finder.cgi?request=1&match=single&type=per_game&per_minute_base=36&per_poss_base=100&season_start=1&season_end=-1&lg_id=NBA&age_min=0&age_max=99&is_playoffs=N&height_min=0&height_max=99&year_min=1981&year_max=2017&birth_country_is=Y&as_comp=gt&pos_is_g=Y&pos_is_gf=Y&pos_is_f=Y&pos_is_fg=Y&pos_is_fc=Y&pos_is_c=Y&pos_is_cf=Y&force%3Apos_is=1&award=mvp&c6mult=1.0&order_by=season'
al = requests.get(url2)
soup2 = BeautifulSoup(al.content, 'html.parser')
column_headers_past = [th.getText() for th in
soup2.findAll('tr')[1].findAll('th')]
data_rows_past = soup2.findAll('tr')[2:]
player_data_past = [[td.getText() for td in data_rows_past[i].findAll('td')]
for i in range(len(data_rows_past))]
df_past = pd.DataFrame(player_data_past, columns=column_headers_past[1:])
df_past = df_past.set_index('Player')
df_past = df_past.drop(['Tm', 'Lg'], axis=1)
#drop row of null values, which was used to separate decades on the Basketball Reference website
df_past = df_past.dropna(axis=0)
#get advanced statistics for MVP winners since 1980
url3 = 'http://www.basketball-reference.com/play-index/psl_finder.cgi?request=1&match=single&per_minute_base=36&per_poss_base=100&type=advanced&season_start=1&season_end=-1&lg_id=NBA&age_min=0&age_max=99&is_playoffs=N&height_min=0&height_max=99&year_min=1981&year_max=2017&birth_country_is=Y&as_comp=gt&pos_is_g=Y&pos_is_gf=Y&pos_is_f=Y&pos_is_fg=Y&pos_is_fc=Y&pos_is_c=Y&pos_is_cf=Y&force%3Apos_is=1&award=mvp&c6mult=1.0&order_by=season'
dl = requests.get(url3)
soup3 = BeautifulSoup(dl.content, 'html.parser')
column_headers_past_adv = [th.getText() for th in
soup3.findAll('tr')[1].findAll('th')]
data_rows_past_adv = soup3.findAll('tr')[2:]
player_data_past_adv = [[td.getText() for td in data_rows_past_adv[i].findAll('td')]
for i in range(len(data_rows_past_adv))]
df_past_adv = pd.DataFrame(player_data_past_adv, columns=column_headers_past_adv[1:])
df_past_adv = df_past_adv.set_index('Player')
#drop duplicate and unnecessary columns
df_past_adv = df_past_adv.drop(['Age', 'Tm', 'Lg', 'Season', 'G', 'GS', 'MP'], axis=1)
#drop row of null values
df_past_adv = df_past_adv.dropna(axis=0)
historical = pd.concat([df_past, df_past_adv], axis=1)
historical
#convert to proper data types
historical = historical.apply(pd.to_numeric, errors='ignore')
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (12,12), sharex=True, sharey=False)
MVP['PTS'].plot.bar(ax=axes[0,0], color = ['b', 'b', 'b', 'r']); axes[0,0].set_title('Points per Game')
MVP['eFG%'].plot.bar(ax=axes[1,0], color = ['b', 'b', 'r', 'b']); axes[1,0].set_title('Effective Field Goal Percentage')
MVP['AST'].plot.bar(ax=axes[0,1], color = ['r', 'b', 'b', 'b']); axes[0,1].set_title('Assists per Game')
MVP['TRB'].plot.bar(ax=axes[1,1], color = ['b', 'b', 'b', 'r']); axes[1,1].set_title('Rebounds per Game')
import seaborn as sns
fig, ax = plt.subplots()
MVP['PER'].plot(ax=ax, kind = 'bar', color = ['b', 'b', 'b', 'r'])
ax.set_ylabel('PER')
ax.set_xlabel('')
ax.axhline(historical['PER'].mean(), color = 'k', linestyle = '--', alpha = .4)
fig, ax = plt.subplots()
MVP['VORP'].plot(ax=ax, kind = 'bar', color = ['b', 'b', 'b', 'r'])
ax.set_ylabel('Value Over Replacement Player')
ax.set_xlabel('')
ax.axhline(historical['VORP'].mean(), color = 'k', linestyle = '--', alpha = .4)
fig, ax = plt.subplots()
MVP['WS'].plot(ax=ax, kind = 'bar', color = ['r', 'b', 'b', 'b'])
ax.set_ylabel('Win Shares')
ax.set_xlabel('')
ax.axhline(historical['WS'].mean(), color = 'k', linestyle = '--', alpha = .4)
fig, ax = plt.subplots()
MVP['DWS'].plot(ax=ax, kind = 'bar', color = ['b', 'r', 'b', 'b'])
ax.set_ylabel('Defensive Win Share')
ax.set_xlabel('')
ax.axhline(historical['DWS'].mean(), color = 'k', linestyle = '--', alpha = .4)
fig, ax = plt.subplots()
MVP['WS/48'].plot(ax=ax, kind = 'bar', color = ['b', 'r', 'b', 'b'])
ax.set_ylabel('Win Share/48 Minutes')
ax.set_xlabel('')
ax.axhline(historical['WS/48'].mean(), color = 'k', linestyle = '--', alpha = .4)
print(historical['WS/48'].mean())
fig, ax = plt.subplots()
MVP['USG%'].plot(ax=ax, kind = 'bar', color = ['b', 'b', 'b', 'r'])
ax.set_ylabel('Usage Percentage')
ax.set_xlabel('')
ax.axhline(historical['USG%'].mean(), color = 'k', linestyle = '--', alpha = .4)
print(historical['USG%'].mean())
url4 ='http://www.basketball-reference.com/play-index/tsl_finder.cgi?request=1&match=single&type=team_totals&lg_id=NBA&year_min=2017&year_max=2017&order_by=wins'
e1 = requests.get(url4)
soup4 = BeautifulSoup(e1.content, 'html.parser')
column_headers_past_adv = [th.getText() for th in
soup4.findAll('tr')[1].findAll('th')]
data_rows_past_adv = soup4.findAll('tr')[2:]
column_headers_team = [th.getText() for th in
soup4.findAll('tr')[1].findAll('th')]
data_rows_team = soup4.findAll('tr')[3:12]
team_wins = [[td.getText() for td in data_rows_team[i].findAll('td')]
for i in range(len(data_rows_team))]
df_team = pd.DataFrame(team_wins, columns=column_headers_team[1:])
df_team = df_team.set_index('Tm')
df_team =df_team.drop(['TOR*','UTA*','LAC*','WAS*'])
Team =df_team
Team
Team['W']['SAS*']
Hou_wins = int((Team['W']['HOU*']))
Harden_Wins = int(MVP['WS']['James Harden'])
Harden_winpct = Harden_Wins/Hou_wins
Harden_nonwin = 1 - Harden_winpct
SAS_wins = int((Team['W']['SAS*']))
Leo_Wins = int(MVP['WS']['Kawhi Leonard'])
Leo_winpct = Leo_Wins/SAS_wins
Leo_nonwin = 1 - Leo_winpct
Cle_wins = int((Team['W']['CLE*']))
LeBron_Wins = int(MVP['WS']['LeBron James'])
LeBron_winpct = LeBron_Wins/Cle_wins
LeBron_nonwin = 1 - LeBron_winpct
OKC_wins = int((Team['W']['OKC*']))
Westbrook_Wins = int(MVP['WS']['Russell Westbrook'])
Westbrook_winpct = Westbrook_Wins/OKC_wins
Westbrook_nonwin = 1 - Westbrook_winpct
df1 = ([Harden_winpct, Leo_winpct, LeBron_winpct, Westbrook_winpct])
df2 = ([Harden_nonwin, Leo_nonwin, LeBron_nonwin, Westbrook_nonwin])
df3 = pd.DataFrame(df1)
df4 = pd.DataFrame(df2)
Win_Share_Per = pd.concat([df3, df4], axis =1)
Win_Share_Per.columns = ['% Wins Accounted For', 'Rest of Team']
Win_Share_Per = Win_Share_Per.T
Win_Share_Per.columns = ['James Harden', 'Kawhi Leonard', 'LeBron James', 'Russell Westbrook']
pic1 = urllib.request.urlretrieve("http://stats.nba.com/media/players/230x185/201935.png", "201935.png")
pic2 = urllib.request.urlretrieve("http://stats.nba.com/media/players/230x185/202695.png", "202695.png")
pic3 = urllib.request.urlretrieve("http://stats.nba.com/media/players/230x185/2544.png", "2544.png")
pic4 = urllib.request.urlretrieve("http://stats.nba.com/media/players/230x185/201566.png", "201566.png")
pic5 = urllib.request.urlretrieve("https://upload.wikimedia.org/wikipedia/en/thumb/2/28/Houston_Rockets.svg/410px-Houston_Rockets.svg.png", "410px-Houston_Rockets.svg.png")
pic6 = urllib.request.urlretrieve("https://upload.wikimedia.org/wikipedia/en/thumb/a/a2/San_Antonio_Spurs.svg/512px-San_Antonio_Spurs.svg.png", "512px-San_Antonio_Spurs.svg.png")
pic7 = urllib.request.urlretrieve("https://upload.wikimedia.org/wikipedia/en/thumb/f/f7/Cleveland_Cavaliers_2010.svg/295px-Cleveland_Cavaliers_2010.svg.png", "295px-Cleveland_Cavaliers_2010.svg.png")
pic8 = urllib.request.urlretrieve("https://upload.wikimedia.org/wikipedia/en/thumb/5/5d/Oklahoma_City_Thunder.svg/250px-Oklahoma_City_Thunder.svg.png", "250px-Oklahoma_City_Thunder.svg.png")
harden_pic = plt.imread(pic1[0])
leonard_pic = plt.imread(pic2[0])
james_pic = plt.imread(pic3[0])
westbrook_pic = plt.imread(pic4[0])
rockets_pic = plt.imread(pic5[0])
spurs_pic = plt.imread(pic6[0])
cavaliers_pic = plt.imread(pic7[0])
thunder_pic = plt.imread(pic8[0])
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (12,12))
Win_Share_Per['James Harden'].plot.pie(ax=axes[0,0], colors = ['r', 'yellow'])
Win_Share_Per['Kawhi Leonard'].plot.pie(ax=axes[0,1], colors = ['black', 'silver'])
Win_Share_Per['LeBron James'].plot.pie(ax=axes[1,0], colors = ['maroon', 'navy'])
Win_Share_Per['Russell Westbrook'].plot.pie(ax=axes[1,1], colors = ['blue', 'orangered'])
img1 = OffsetImage(harden_pic, zoom=0.4)
img1.set_offset((290,800))
a = axes[0,0].add_artist(img1)
a.set_zorder(10)
img2 = OffsetImage(leonard_pic, zoom=0.4)
img2.set_offset((800,800))
b= axes[0,1].add_artist(img2)
b.set_zorder(10)
img3 = OffsetImage(james_pic, zoom=0.4)
img3.set_offset((290,290))
c = axes[1,0].add_artist(img3)
c.set_zorder(10)
img4 = OffsetImage(westbrook_pic, zoom=0.4)
img4.set_offset((790,290))
d = axes[1,1].add_artist(img4)
d.set_zorder(10)
img5 = OffsetImage(rockets_pic, zoom=0.4)
img5.set_offset((150,620))
e = axes[1,1].add_artist(img5)
e.set_zorder(10)
img6 = OffsetImage(spurs_pic, zoom=0.3)
img6.set_offset((650,620))
f = axes[1,1].add_artist(img6)
f.set_zorder(10)
img7 = OffsetImage(cavaliers_pic, zoom=0.4)
img7.set_offset((150,130))
g = axes[1,1].add_artist(img7)
g.set_zorder(10)
img8 = OffsetImage(thunder_pic, zoom=0.4)
img8.set_offset((650,130))
h = axes[1,1].add_artist(img8)
h.set_zorder(10)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 0 - hyperparams
Step2: Step 1 - collect data (and/or generate them)
Step3: Step 2 - Build model
Step4: Step 3 training the network
Step5: Conclusion
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import tensorflow as tf
from os import path
import numpy as np
import pandas as pd
import csv
from sklearn.model_selection import StratifiedShuffleSplit
from time import time
from matplotlib import pyplot as plt
import seaborn as sns
from mylibs.jupyter_notebook_helper import show_graph
from tensorflow.contrib import rnn
from tensorflow.contrib import learn
import shutil
from tensorflow.contrib.learn.python.learn import learn_runner
from IPython.display import Image
from IPython.core.display import HTML
from mylibs.tf_helper import getDefaultGPUconfig
from data_providers.binary_shifter_varlen_data_provider import \
BinaryShifterVarLenDataProvider
from data_providers.price_history_varlen_data_provider import PriceHistoryVarLenDataProvider
from models.model_05_price_history_rnn_varlen import PriceHistoryRnnVarlen
from sklearn.metrics import r2_score
from mylibs.py_helper import factors
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from statsmodels.tsa.stattools import coint
dtype = tf.float32
seed = 16011984
random_state = np.random.RandomState(seed=seed)
config = getDefaultGPUconfig()
%matplotlib inline
from common import get_or_run_nn
num_epochs = 10
series_max_len = 60
num_features = 1 #just one here, the function we are predicting is one-dimensional
state_size = 400
target_len = 30
batch_size = 47
csv_in = '../price_history_03a_fixed_width.csv'
npz_path = '../price_history_03_dp_60to30_from_fixed_len.npz'
# XX, YY, sequence_lens, seq_mask = PriceHistoryVarLenDataProvider.createAndSaveDataset(
# csv_in=csv_in,
# npz_out=npz_path,
# input_seq_len=60, target_seq_len=30)
# XX.shape, YY.shape, sequence_lens.shape, seq_mask.shape
dp = PriceHistoryVarLenDataProvider(filteringSeqLens = lambda xx : xx >= target_len,
npz_path=npz_path)
dp.inputs.shape, dp.targets.shape, dp.sequence_lengths.shape, dp.sequence_masks.shape
model = PriceHistoryRnnVarlen(rng=random_state, dtype=dtype, config=config)
graph = model.getGraph(batch_size=batch_size, state_size=state_size,
rnn_cell= PriceHistoryRnnVarlen.RNN_CELLS.GRU,
target_len=target_len, series_max_len=series_max_len)
show_graph(graph)
rnn_cell = PriceHistoryRnnVarlen.RNN_CELLS.GRU
num_epochs, state_size, batch_size
def experiment():
dynStats, predictions_dict = model.run(epochs=num_epochs,
rnn_cell=rnn_cell,
state_size=state_size,
series_max_len=series_max_len,
target_len=target_len,
npz_path=npz_path,
batch_size=batch_size)
return dynStats, predictions_dict
dyn_stats, preds_dict = get_or_run_nn(experiment,
filename='002_rnn_gru_60to30')
dyn_stats.plotStats()
plt.show()
r2_scores = [r2_score(y_true=dp.targets[ind], y_pred=preds_dict[ind])
for ind in range(len(dp.targets))]
ind = np.argmin(r2_scores)
ind
reals = dp.targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
sns.tsplot(data=dp.inputs[ind].flatten())
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
%%time
dtw_scores = [fastdtw(dp.targets[ind], preds_dict[ind])[0]
for ind in range(len(dp.targets))]
np.mean(dtw_scores)
coint(preds, reals)
cur_ind = np.random.randint(len(dp.targets))
reals = dp.targets[cur_ind]
preds = preds_dict[cur_ind]
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
rnn_cell = PriceHistoryRnnVarlen.RNN_CELLS.GRU
num_epochs = 50
state_size, batch_size
def experiment():
dynStats, predictions_dict = model.run(epochs=num_epochs,
rnn_cell=rnn_cell,
state_size=state_size,
series_max_len=series_max_len,
target_len=target_len,
npz_path=npz_path,
batch_size=batch_size)
return dynStats, predictions_dict
dyn_stats, preds_dict = get_or_run_nn(experiment,
filename='002_rnn_gru_60to30_50epochs')
dyn_stats.plotStats()
plt.show()
r2_scores = [r2_score(y_true=dp.targets[ind], y_pred=preds_dict[ind])
for ind in range(len(dp.targets))]
ind = np.argmin(r2_scores)
ind
reals = dp.targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
sns.tsplot(data=dp.inputs[ind].flatten())
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
%%time
dtw_scores = [fastdtw(dp.targets[ind], preds_dict[ind])[0]
for ind in range(len(dp.targets))]
np.mean(dtw_scores)
coint(preds, reals)
cur_ind = np.random.randint(len(dp.targets))
reals = dp.targets[cur_ind]
preds = preds_dict[cur_ind]
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step9: Functions
Step10: Data
Step11: Clean
Step12: Features
Step13: Split the training data
Step14: tfidf
Step15: Combine
Step16: Training
Step17: Testing on dev
Step18: Test Data
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import regexp_tokenize
from nltk.stem.porter import PorterStemmer
from sklearn import cross_validation
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
plt.style.use('ggplot')
def sample(df, n=1000, include_cats=[2, 3, 4, 5, 6, 7], random_state=1868):
Take a random sample of size `n` for categories
in `include_cats`.
df = df.copy()
subset = df[df.Category.isin(include_cats)]
sample = subset.sample(n, random_state=random_state)
return sample
def clean_text(df, col):
A function for keeping only alpha-numeric
characters and replacing all white space with
a single space.
df = df.copy()
porter_stemmer = PorterStemmer()
return df[col].apply(lambda x: re.sub(';br&', ';&', x))\
.apply(lambda x: re.sub('&.+?;', '', x))\
.apply(lambda x: re.sub('[^A-Za-z0-9]+', ' ', x.lower()))\
.apply(lambda x: re.sub('\s+', ' ', x).strip())\
.apply(lambda x: ' '.join([porter_stemmer.stem(w)
for w in x.split()]))
def count_pattern(df, col, pattern):
Count the occurrences of `pattern`
in df[col].
df = df.copy()
return df[col].str.count(pattern)
def split_on_sentence(text):
Tokenize the text on sentences.
Returns a list of strings (sentences).
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
return sent_tokenizer.tokenize(text)
def split_on_word(text):
Use regular expression tokenizer.
Keep apostrophes.
Returns a list of lists, one list for each sentence:
[[word, word], [word, word, ..., word], ...].
if type(text) is list:
return [regexp_tokenize(sentence, pattern="\w+(?:[-']\w+)*")
for sentence in text]
else:
return regexp_tokenize(text, pattern="\w+(?:[-']\w+)*")
def features(df):
Create the features in the specified DataFrame.
stop_words = stopwords.words('english')
df = df.copy()
df['n_questionmarks'] = count_pattern(df, 'Text', '\?')
df['n_periods'] = count_pattern(df, 'Text', '\.')
df['n_apostrophes'] = count_pattern(df, 'Text', '\'')
df['n_the'] = count_pattern(df, 'Text', 'the ')
df['first_word'] = df.text_clean.apply(lambda x: split_on_word(x)[0])
question_words = ['what', 'how', 'why', 'is']
for w in question_words:
col_wc = 'n_' + w
col_fw = 'fw_' + w
df[col_fw] = (df.first_word == w) * 1
del df['first_word']
df['n_words'] = df.text_clean.apply(lambda x: len(split_on_word(x)))
df['n_stopwords'] = df.text_clean.apply(lambda x:
len([w for w in split_on_word(x)
if w not in stop_words]))
df['n_first_person'] = df.text_clean.apply(lambda x:
sum([w in person_first
for w in x.split()]))
df['n_second_person'] = df.text_clean.apply(lambda x:
sum([w in person_second
for w in x.split()]))
df['n_third_person'] = df.text_clean.apply(lambda x:
sum([w in person_third
for w in x.split()]))
return df
def flatten_words(list1d, get_unique=False):
qa = [s.split() for s in list1d]
if get_unique:
return sorted(list(set([w for sent in qa for w in sent])))
else:
return [w for sent in qa for w in sent]
def tfidf_matrices(tr, te, col='text_clean'):
Returns tfidf matrices for both the
training and test DataFrames.
The matrices will have the same number of
columns, which represent unique words, but
not the same number of rows, which represent
samples.
tr = tr.copy()
te = te.copy()
text = tr[col].values.tolist() + te[col].values.tolist()
vocab = flatten_words(text, get_unique=True)
tfidf = TfidfVectorizer(stop_words='english', vocabulary=vocab)
tr_matrix = tfidf.fit_transform(tr.text_clean)
te_matrix = tfidf.fit_transform(te.text_clean)
return tr_matrix, te_matrix
def concat_tfidf(df, matrix):
df = df.copy()
df = pd.concat([df, pd.DataFrame(matrix.todense())], axis=1)
return df
def jitter(values, sd=0.25):
Jitter points for use in a scatterplot.
return [np.random.normal(v, sd) for v in values]
person_first = ['i', 'we', 'me', 'us', 'my', 'mine', 'our', 'ours']
person_second = ['you', 'your', 'yours']
person_third = ['he', 'she', 'it', 'him', 'her', 'his', 'hers', 'its']
training = pd.read_csv('../data/newtrain.csv')
test = pd.read_csv('../data/newtest.csv')
training['text_clean'] = clean_text(training, 'Text')
test['text_clean'] = clean_text(test, 'Text')
training = features(training)
test = features(test)
train, dev = cross_validation.train_test_split(training, test_size=0.2, random_state=1868)
train = train.append(sample(train, n=800))
train.reset_index(drop=True, inplace=True)
dev.reset_index(drop=True, inplace=True)
train_matrix, dev_matrix = tfidf_matrices(train, dev)
train = concat_tfidf(train, train_matrix)
dev = concat_tfidf(dev, dev_matrix)
svm = LinearSVC(dual=False, max_iter=5000)
features = train.columns[3:]
X = train[features].values
y = train['Category'].values
features_dev = dev[features].values
svm.fit(X, y)
dev_predicted = svm.predict(features_dev)
accuracy_score(dev.Category, dev_predicted)
plt.figure(figsize=(6, 5))
plt.scatter(jitter(dev.Category, 0.15),
jitter(dev_predicted, 0.15),
color='#348ABD', alpha=0.25)
plt.title('Support Vector Classifier\n')
plt.xlabel('Ground Truth')
plt.ylabel('Predicted')
training = training.append(sample(training, n=1200))
training.reset_index(drop=True, inplace=True)
training_matrix, test_matrix = tfidf_matrices(training, test)
training = concat_tfidf(training, training_matrix)
test = concat_tfidf(test, test_matrix)
features = training.columns[3:]
X = training[features].values
y = training['Category'].values
features_test = test[features].values
svm.fit(X, y)
test_predicted = svm.predict(features_test)
test['Category'] = test_predicted
output = test[['Id', 'Category']]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Create a Radical Pilot Session
Step2: 4. Create Pilot and Unit Managers
Step3: 5. Submit the pilot to the Pilot and Unit Managers
Step4: 6. Submit Compute Units
Step5: Submit the created Compute Units to the Unit Manager.
Step6: Printing the output of a Compute Unit
Step7: 7. Always clean up the session
|
<ASSISTANT_TASK:>
Python Code:
import os,sys
import radical.pilot as rp
import ast
os.environ["RADICAL_PILOT_DBURL"]="mongodb://ec2-54-221-194-147.compute-1.amazonaws.com:24242/sc15tut"
os.environ["RADICAL_PILOT_VERBOSE"]="DEBUG"
def print_details(detail_object):
if type(detail_object)==str:
detail_object = ast.literal_eval(detail_object)
for i in detail_object:
detail_object[i]=str(detail_object[i])
return pd.DataFrame(detail_object.values(),
index=detail_object.keys(),
columns=["Value"])
session = rp.Session()
print "Session id %s"%session.uid
c = rp.Context('ssh')
c.user_id = ""
session.add_context(c)
print "Initializing Pilot Manager ..."
pmgr = rp.PilotManager(session=session)
print "Initializing Unit Manager ..."
umgr = rp.UnitManager (session=session,
scheduler=rp.SCHED_DIRECT_SUBMISSION)
pdesc = rp.ComputePilotDescription ()
pdesc.resource = "yarn.stampede" # NOTE: This is a "label", not a hostname
pdesc.runtime = 60 # minutes
pdesc.cores = 16
pdesc.cleanup = False
pdesc.project = '' #Include the Allocation here
pdesc.queue = 'development' #You can select a different queue if you want.
# submit the pilot.
print "Submitting Compute Pilot to Pilot Manager ..."
pilot = pmgr.submit_pilots(pdesc)
print "Registering Compute Pilot with Unit Manager ..."
umgr.add_pilots(pilot)
NUMBER_JOBS = 16
cudesc_list = []
for i in range(NUMBER_JOBS):
cudesc = rp.ComputeUnitDescription()
cudesc.environment = {'CU_NO': i}
cudesc.executable = "/bin/echo"
cudesc.arguments = ['I am CU number $CU_NO']
cudesc.cores = 1
cudesc_list.append(cudesc)
print "Submit Compute Units to Unit Manager ..."
cu_set = umgr.submit_units (cudesc_list)
print "Waiting for CUs to complete ..."
umgr.wait_units()
print "All CUs completed successfully!"
for unit in cu_set:
print "* CU %s, state %s, exit code: %s, stdout: %s" \
% (unit.uid, unit.state, unit.exit_code, unit.stdout)
session.close ()
del session
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Objectives
Steps
Step2: Desired visualization
Step 1
Step3: Step 1a
Step4: What colors are possible to use? Check out bokeh.palettes
Step5: Exercise
Step6: Bokeh tools
Step7: Synthesizing Bokeh Concepts (Classes)
Step8: Now add ad-hoc data
Step9: Exercise
|
<ASSISTANT_TASK:>
Python Code:
from bokeh.plotting import figure, output_file, show, output_notebook, vplot
import random
import numpy as np
import pandas as pd
output_notebook() # Use so see output in the Jupyter notebook
import bokeh
bokeh.__version__
from IPython.display import Image
Image(filename='biological_data.png')
df_bio = pd.read_csv("biological_data.csv")
df_bio.head()
plot = figure(plot_width=400, plot_height=400)
plot.circle(df_bio['rate'], df_bio['yield'])
plot.xaxis.axis_label = 'rate'
plot.yaxis.axis_label = 'yield'
show(plot)
# What are the possible colors
df_bio['line'].unique()
# Generate a plot with a different color for each line
colors = {'HA': 'red', 'HR': 'green', 'UA': 'blue', 'WT': 'purple'}
plot = figure(plot_width=700, plot_height=800)
plot.title.text = 'Phenotypes for evolutionary lines.'
for line in list(colors.keys()):
df = df_bio[df_bio.line == line]
color = colors[line]
plot.circle(df['rate'], df['yield'], color=color, legend=line)
plot.legend.location = "top_right"
show(plot)
import bokeh.palettes as palettes
print palettes.__doc__
#palettes.magma(4)
# Generate the colors dictionary
# Fill this in....
# Plot with the generated palette
# Fill this in ...
TOOLS = 'box_zoom,box_select,resize,reset'
plot = figure(plot_width=200, plot_height=200, title=None, tools=TOOLS)
plot.scatter(range(10), range(10))
show(plot)
from bokeh.models import HoverTool, BoxSelectTool
TOOLS = [HoverTool(), BoxSelectTool()]
plot = figure(plot_width=200, plot_height=200, title=None, tools=TOOLS)
show(plot)
from bokeh.plotting import figure, output_file, show
from bokeh.models import HoverTool, BoxSelectTool
output_file("toolbar.html")
TOOLS = [BoxSelectTool(), HoverTool()]
p = figure(plot_width=400, plot_height=400, title=None, tools=TOOLS)
p.circle([1, 2, 3, 4, 5], [2, 5, 8, 2, 7], size=10)
show(p)
from bokeh.plotting import figure, output_file, show, ColumnDataSource
from bokeh.models import HoverTool
output_file("toolbar.html")
hover = HoverTool(
tooltips=[
("index", "$index"),
("(x,y)", "(@x, @y)"),
("desc", "@desc"),
]
)
p = figure(plot_width=400, plot_height=400, tools=[hover],
title="Mouse over the dots")
source = ColumnDataSource(
data={
'x': [1, 2, 3, 4, 5],
'y': [2, 5, 8, 2, 7],
'desc': ['A', 'b', 'C', 'd', 'E'],
}
)
p.circle('x', 'y', size=20, source=source)
show(p)
Image(filename='BokehArchitecture.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bnu', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def sort_third(l: list):
l = list(l)
l[::3] = sorted(l[::3])
return l
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Inputs
Step2: Metabolism
Step3: Plotting concentrations of compounds.
Step4: Plotting time series of compound concentrations.
|
<ASSISTANT_TASK:>
Python Code:
from jupyterthemes.stylefx import set_nb_theme
set_nb_theme('grade3')
import os
PREFIX = os.environ.get('PWD', '.')
# PREFIX = "../build/outputs"
import numpy
import pandas
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
compounds = pandas.read_csv(os.path.join(PREFIX, "compounds.csv"))
num_compounds = compounds.shape[0]
print('[{}] compounds were loaded.'.format(num_compounds))
metabolism = pandas.read_csv(os.path.join(PREFIX, "metabolism.csv"))
print('[{}] reactions were loaded.'.format(metabolism.shape[0]))
timecourse = pandas.read_csv(os.path.join(PREFIX, "timecourse.csv"))
timecourse = timecourse.rename(columns={timecourse.columns[0]: "Time"})
concentrations = pandas.DataFrame(timecourse.values[: , : num_compounds+2], timecourse.index, timecourse.columns[ : num_compounds+2])
indices = [0] + list(range(num_compounds+2, timecourse.shape[1]))
fluxes = pandas.DataFrame(timecourse.values[: , indices], timecourse.index, timecourse.columns[indices])
def plot1(df, filename, indices=None, nsteps=10, rescaled=False, xlabel="", ylabel=""):
if indices is None:
(m, _) = df.shape
indices = range(0, m, m // nsteps)
if rescaled:
func = lambda idx: df.iloc[idx, 1: ] / df.iloc[0, 1: ]
else:
func = lambda idx: df.iloc[idx, 1: ]
ymin, ymax = +numpy.inf, -numpy.inf
for idx in indices:
y = df.iloc[idx, 1: ] / df.iloc[0, 1: ]
ymin, ymax = min(ymin, min(func(idx))), max(ymax, max(func(idx)))
(ymin, ymax) = ymin - (ymax - ymin) / 15, ymax + (ymax - ymin) / 15
scatters = [
dict(
y=func(idx),
text=df.columns[1: ],
mode='markers',
marker=dict(
size='12', color=func(idx), colorscale='Viridis', showscale=True,
cmin=ymin, cmax=ymax,
line=dict(width=1)
),
visible=False
)
for idx in indices]
scatters[0]['visible'] = True
steps = []
for i, idx in enumerate(indices):
step = dict(
method='restyle',
label='{}'.format(df.iloc[idx, 0]),
args=['visible', [False] * len(scatters)],
)
step['args'][1][i] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [
dict(
active=0,
currentvalue=dict(prefix="Time="),
pad=dict(t=50),
steps=steps
)
]
layout = dict(
hovermode= 'closest',
xaxis= dict(title=xlabel),
yaxis=dict(title=ylabel, range=(ymin, ymax)),
showlegend= False,
sliders=sliders,
height=600
)
fig = dict(data=scatters, layout=layout)
iplot(fig, filename=filename)
plot1(concentrations, "concentration_markers", nsteps=15, xlabel="Compound", ylabel="Concentration")
def plot2(df, filename, ngroups=20, lenlabel=30, rescaled=False, xlabel="", ylabel=""):
indices = list(range(1, df.shape[1]))
tick = len(indices) // (ngroups - 1)
if rescaled:
func = lambda idx: df.iloc[: , idx] / df.iloc[0, idx]
else:
func = lambda idx: df.iloc[: , idx]
ymin, ymax = +numpy.inf, -numpy.inf
for idx in indices:
ymin, ymax = min(ymin, min(func(idx))), max(ymax, max(func(idx)))
(ymin, ymax) = ymin - (ymax - ymin) / 15, ymax + (ymax - ymin) / 15
scatters = [
dict(
x=df.iloc[: , 0],
y=func(idx),
mode='lines',
name=df.columns[idx][: lenlabel],
visible=(idx < tick)
)
for idx in indices]
steps = []
for i in range(ngroups):
step = dict(
method='restyle',
label=i + 1,
args=['visible', [(i * tick <= j < (i + 1) * tick) for j in range(len(scatters))]],
)
if any(step['args'][1]):
steps.append(step)
sliders = [
dict(
active=0,
pad=dict(t=50),
steps=steps,
currentvalue=dict(prefix='Group')
)
]
layout = dict(
hovermode= 'closest',
xaxis= dict(title=xlabel),
yaxis=dict(title=ylabel, range=(ymin, ymax)),
# showlegend= False,
sliders=sliders,
height=600
)
fig = dict(data=scatters, layout=layout)
iplot(fig, filename=filename)
plot2(concentrations, "concentration_lines", xlabel="Time", ylabel="Concentration")
plot1(fluxes, "flux_markers", nsteps=15, rescaled=True, xlabel="Reaction", ylabel="Relative Flux")
plot2(fluxes, "flux_lines", rescaled=True, xlabel="Time", ylabel="Relative Flux")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preprocessing
Step2: Validation curves
Step3: Max features
Step4: Minimum samples in leaf node
Step5: KS-test tuning
Step6: Minimum samples in leaf node
Step7: Maximum depth for various minimum samples in leaf node
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('/home/jbourbeau/cr-composition')
print('Added to PYTHONPATH')
from __future__ import division, print_function
from collections import defaultdict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn.apionly as sns
import scipy.stats as stats
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import validation_curve, GridSearchCV, cross_val_score, ParameterGrid, KFold, ShuffleSplit
import composition as comp
# Plotting-related
sns.set_palette('muted')
sns.set_color_codes()
color_dict = defaultdict()
for i, composition in enumerate(['light', 'heavy', 'total']):
color_dict[composition] = sns.color_palette('muted').as_hex()[i]
%matplotlib inline
X_train_sim, X_test_sim, y_train_sim, y_test_sim, le, energy_train_sim, energy_test_sim = comp.preprocess_sim(return_energy=True)
X_test_data, energy_test_data = comp.preprocess_data(return_energy=True)
pipeline = comp.get_pipeline('RF')
param_range = np.arange(1, 16)
train_scores, test_scores = validation_curve(
estimator=pipeline,
X=X_train_sim,
y=y_train_sim,
param_name='classifier__max_depth',
param_range=param_range,
cv=5,
scoring='accuracy',
verbose=2,
n_jobs=20)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean,
color='b', marker='o',
markersize=5, label='training accuracy')
plt.fill_between(param_range,
train_mean + train_std,
train_mean - train_std,
alpha=0.15, color='b')
plt.plot(param_range, test_mean,
color='g', linestyle='None',
marker='s', markersize=5,
label='validation accuracy')
plt.fill_between(param_range,
test_mean + test_std,
test_mean - test_std,
alpha=0.15, color='g')
plt.grid()
plt.legend(loc='lower right')
plt.xlabel('Maximum depth')
plt.ylabel('Accuracy')
# plt.ylim([0.7, 0.8])
# plt.savefig('/home/jbourbeau/public_html/figures/composition/parameter-tuning/RF-validation_curve_min_samples_leaf.png', dpi=300)
plt.show()
pipeline = comp.get_pipeline('RF')
param_range = np.arange(1, X_train.shape[1])
train_scores, test_scores = validation_curve(
estimator=pipeline,
X=X_train,
y=y_train,
param_name='classifier__max_features',
param_range=param_range,
cv=10,
verbose=2,
n_jobs=20)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean,
color='b', marker='o',
markersize=5, label='training accuracy')
plt.fill_between(param_range, train_mean + train_std,
train_mean - train_std, alpha=0.15,
color='b')
plt.plot(param_range, test_mean,
color='g', linestyle='None',
marker='s', markersize=5,
label='validation accuracy')
plt.fill_between(param_range,
test_mean + test_std,
test_mean - test_std,
alpha=0.15, color='g')
plt.grid()
# plt.xscale('log')
plt.legend(loc='lower right')
plt.xlabel('Maximum features')
plt.ylabel('Accuracy')
# plt.ylim([0.8, 1.0])
# plt.savefig('/home/jbourbeau/public_html/figures/composition/parameter-tuning/RF-validation_curve_min_samples_leaf.png', dpi=300)
plt.show()
pipeline = comp.get_pipeline('RF')
param_range = np.arange(1, 400, 25)
train_scores, test_scores = validation_curve(
estimator=pipeline,
X=X_train,
y=y_train,
param_name='classifier__min_samples_leaf',
param_range=param_range,
cv=10,
verbose=2,
n_jobs=20)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean,
color='b', marker='o',
markersize=5, label='training accuracy')
plt.fill_between(param_range, train_mean + train_std,
train_mean - train_std, alpha=0.15,
color='b')
plt.plot(param_range, test_mean,
color='g', linestyle='None',
marker='s', markersize=5,
label='validation accuracy')
plt.fill_between(param_range,
test_mean + test_std,
test_mean - test_std,
alpha=0.15, color='g')
plt.grid()
# plt.xscale('log')
plt.legend()
plt.xlabel('Minimum samples in leaf node')
plt.ylabel('Accuracy')
# plt.ylim([0.8, 1.0])
# plt.savefig('/home/jbourbeau/public_html/figures/composition/parameter-tuning/RF-validation_curve_min_samples_leaf.png', dpi=300)
plt.show()
comp_list = ['light', 'heavy']
max_depth_list = np.arange(1, 16)
pval_comp = defaultdict(list)
ks_stat = defaultdict(list)
kf = KFold(n_splits=10)
fold_num = 0
for train_index, test_index in kf.split(X_train):
fold_num += 1
print('\r')
print('Fold {}: '.format(fold_num), end='')
X_train_fold, X_test_fold = X_train[train_index], X_train[test_index]
y_train_fold, y_test_fold = y_train[train_index], y_train[test_index]
pval_maxdepth = defaultdict(list)
print('max_depth = ', end='')
for max_depth in max_depth_list:
print('{}...'.format(max_depth), end='')
pipeline = comp.get_pipeline('RF')
pipeline.named_steps['classifier'].set_params(max_depth=max_depth)
pipeline.fit(X_train_fold, y_train_fold)
test_probs = pipeline.predict_proba(X_test_fold)
train_probs = pipeline.predict_proba(X_train_fold)
for class_ in pipeline.classes_:
pval_maxdepth[le.inverse_transform(class_)].append(stats.ks_2samp(test_probs[:, class_], train_probs[:, class_])[1])
for composition in comp_list:
pval_comp[composition].append(pval_maxdepth[composition])
pval_sys_err = {key: np.std(pval_comp[key], axis=0) for key in pval_comp}
pval = {key: np.mean(pval_comp[key], axis=0) for key in pval_comp}
comp_list = ['light']
fig, ax = plt.subplots()
for composition in comp_list:
upper_err = np.copy(pval_sys_err[composition])
upper_err = [val if ((pval[composition][i] + val) < 1) else 1-pval[composition][i] for i, val in enumerate(upper_err)]
lower_err = np.copy(pval_sys_err[composition])
lower_err = [val if ((pval[composition][i] - val) > 0) else pval[composition][i] for i, val in enumerate(lower_err)]
if composition == 'light':
ax.errorbar(max_depth_list -0.25/2, pval[composition],
yerr=[lower_err, upper_err],
marker='.', linestyle=':',
label=composition, alpha=0.75)
if composition == 'heavy':
ax.errorbar(max_depth_list + 0.25/2, pval[composition],
yerr=[lower_err, upper_err],
marker='.', linestyle=':',
label=composition, alpha=0.75)
plt.ylabel('KS-test p-value')
plt.xlabel('Maximum depth')
plt.ylim([-0.1, 1.1])
# plt.legend()
plt.grid()
plt.show()
pval
comp_list = np.unique(df['MC_comp_class'])
min_samples_list = np.arange(1, 400, 25)
pval = defaultdict(list)
ks_stat = defaultdict(list)
print('min_samples_leaf = ', end='')
for min_samples_leaf in min_samples_list:
print('{}...'.format(min_samples_leaf), end='')
pipeline = comp.get_pipeline('RF')
params = {'max_depth': 4, 'min_samples_leaf': min_samples_leaf}
pipeline.named_steps['classifier'].set_params(**params)
pipeline.fit(X_train, y_train)
test_probs = pipeline.predict_proba(X_test)
train_probs = pipeline.predict_proba(X_train)
for class_ in pipeline.classes_:
pval[le.inverse_transform(class_)].append(stats.ks_2samp(test_probs[:, class_], train_probs[:, class_])[1])
fig, ax = plt.subplots()
for composition in pval:
ax.plot(min_samples_list, pval[composition], linestyle='-.', label=composition)
plt.ylabel('KS-test p-value')
plt.xlabel('Minimum samples leaf node')
plt.legend()
plt.grid()
plt.show()
# comp_list = np.unique(df['MC_comp_class'])
comp_list = ['light']
min_samples_list = [1, 25, 50, 75]
min_samples_list = [1, 100, 200, 300]
fig, axarr = plt.subplots(2, 2, sharex=True, sharey=True)
print('min_samples_leaf = ', end='')
for min_samples_leaf, ax in zip(min_samples_list, axarr.flatten()):
print('{}...'.format(min_samples_leaf), end='')
max_depth_list = np.arange(1, 16)
pval = defaultdict(list)
ks_stat = defaultdict(list)
for max_depth in max_depth_list:
pipeline = comp.get_pipeline('RF')
params = {'max_depth': max_depth, 'min_samples_leaf': min_samples_leaf}
pipeline.named_steps['classifier'].set_params(**params)
pipeline.fit(X_train, y_train)
test_probs = pipeline.predict_proba(X_test)
train_probs = pipeline.predict_proba(X_train)
for class_ in pipeline.classes_:
pval[le.inverse_transform(class_)].append(stats.ks_2samp(test_probs[:, class_], train_probs[:, class_])[1])
for composition in pval:
ax.plot(max_depth_list, pval[composition], linestyle='-.', label=composition)
ax.set_ylabel('KS-test p-value')
ax.set_xlabel('Maximum depth')
ax.set_title('min samples = {}'.format(min_samples_leaf))
ax.set_ylim([0, 0.5])
ax.legend()
ax.grid()
plt.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2、Top 10 share holder
Step2: 获取沪深上市公司基本情况。属性包括:
Step3: 业绩报告(主表)
Step4: 盈利能力
Step5: 营运能力
Step6: 成长能力
Step7: 偿债能力
Step11: 3、CandleStick
|
<ASSISTANT_TASK:>
Python Code:
import tushare as ts
import pandas as pd
stock_selected='600699'
df1, data1 = ts.top10_holders(code=stock_selected, gdtype='1')
df1 = df1.sort_values('quarter', ascending=True)
df1.tail(10)
#qts = list(df1['quarter'])
#data = list(df1['props'])
#name = ts.get_realtime_quotes(stock_selected)['name'][0]
import tushare as ts
import pandas as pd
from IPython.display import HTML
#浦发银行2016三季度前十大流通股东情况
df2, data2 = ts.top10_holders(code=stock_selected, year=2016, quarter=3, gdtype='1')
#取前十大流通股东名称
top10name = str(list(data2['name']))
top10name
import tushare as ts
df=ts.get_stock_basics()
#data=df.loc('002281')
#print(data['gpr'])
#data=df.loc('002281')
#df.ix['002281']
#df.ix['002281']
#df.ix['002281']
#df.info()
df[df.name == u'四维图新']
df_out=df[(df.profit>20) &
(df.gpr > 25) &
(df.pe <120) &
(df.pe >0) &
(df.rev >0)][['name','industry','pe','profit','esp','rev','holders','gpr','npr']]
df_out.sort_values(by='npr',ascending=False, inplace = True)
df_out.rename(columns={'name':u'股票','industry':u'行业','pe':u'市盈率',
'profit':u'利润同比','esp':u'每股收益','rev':u'收入同比',
'holders':u'股东人数','gpr':u'毛利率','npr':u'净利率'})[:50]
import tushare as ts
df=ts.get_report_data(2016,4)
#df[df.code=='002405']
df
import tushare as ts
df_profit = ts.get_profit_data(2017,1)
#df_profit.info()
#df_profit[df_profit.code == '002405']
df_out=df_profit[(df_profit.roe>10) & (df_profit.gross_profit_rate > 25) & (df_profit.net_profits >0)]
df_out.sort_values(by='roe',ascending=False, inplace = True)
df_out[:50]
import tushare as ts
df_operation = ts.get_operation_data(2017,1)
df_out=df_operation[df_operation.currentasset_days<120]
df_out.sort_values(by='currentasset_days',ascending=False, inplace = True)
df_out[:50]
# -*- coding: UTF-8 -*-
import tushare as ts
df_growth = ts.get_growth_data(2017,1)
import numpy as np
import pandas as pd
df_out = df_growth[(df_growth.nprg >20) &
(df_growth.mbrg >20)]
df_out.sort_values(by= 'nprg', ascending = True, inplace=True)
writer = pd.ExcelWriter('growth.xlsx')
df_out.to_excel(writer,'growth')
writer.save()
#df_out.to_csv(".\growth.csv",encoding="utf_8_sig",dtype={'code':np.string})
df_out[:50]
import tushare as ts
df_cash = ts.get_cashflow_data(2016,4)
df_out = df_cash[(df_cash.cf_sales > 0)]
df_out.sort_values(by = 'cf_sales', ascending = True, inplace = True)
df_out[:50]
import tushare as ts
import pandas as pd
from IPython.display import HTML
#中国联通前复权数据
#df = ts.get_k_data(stock_selected, start='2016-01-01', end='2016-12-02')
df = ts.get_k_data(stock_selected, start='2016-01-01')
datastr = ''
for idx in df.index:
rowstr = '[\'%s\',%s,%s,%s,%s]' % (df.ix[idx]['date'], df.ix[idx]['open'],
df.ix[idx]['close'], df.ix[idx]['low'],
df.ix[idx]['high'])
datastr += rowstr + ','
datastr = datastr[:-1]
#取股票名称
name = ts.get_realtime_quotes(stock_selected)['name'][0]
datahead =
<div id="chart" style="width:800px; height:600px;"></div>
<script>
require.config({ paths:{ echarts: '//cdn.bootcss.com/echarts/3.2.3/echarts.min', } });
require(['echarts'],function(ec){
var myChart = ec.init(document.getElementById('chart'));
datavar = 'var data0 = splitData([%s]);' % datastr
funcstr =
function splitData(rawData) {
var categoryData = [];
var values = []
for (var i = 0; i < rawData.length; i++) {
categoryData.push(rawData[i].splice(0, 1)[0]);
values.push(rawData[i])
}
return {
categoryData: categoryData,
values: values
};
}
function calculateMA(dayCount) {
var result = [];
for (var i = 0, len = data0.values.length; i < len; i++) {
if (i < dayCount) {
result.push('-');
continue;
}
var sum = 0;
for (var j = 0; j < dayCount; j++) {
sum += data0.values[i - j][1];
}
result.push((sum / dayCount).toFixed(2));
}
return result;
}
option = {
title: {
namestr = 'text: \'%s\',' %name
functail =
left: 0
},
tooltip: {
trigger: 'axis',
axisPointer: {
type: 'line'
}
},
legend: {
data: ['日K', 'MA5', 'MA10', 'MA20', 'MA30']
},
grid: {
left: '10%',
right: '10%',
bottom: '15%'
},
xAxis: {
type: 'category',
data: data0.categoryData,
scale: true,
boundaryGap : false,
axisLine: {onZero: false},
splitLine: {show: false},
splitNumber: 20,
min: 'dataMin',
max: 'dataMax'
},
yAxis: {
scale: true,
splitArea: {
show: true
}
},
dataZoom: [
{
type: 'inside',
start: 50,
end: 100
},
{
show: true,
type: 'slider',
y: '90%',
start: 50,
end: 100
}
],
series: [
{
name: '日K',
type: 'candlestick',
data: data0.values,
markPoint: {
label: {
normal: {
formatter: function (param) {
return param != null ? Math.round(param.value) : '';
}
}
},
data: [
{
name: '标点',
coord: ['2013/5/31', 2300],
value: 2300,
itemStyle: {
normal: {color: 'rgb(41,60,85)'}
}
},
{
name: 'highest value',
type: 'max',
valueDim: 'highest'
},
{
name: 'lowest value',
type: 'min',
valueDim: 'lowest'
},
{
name: 'average value on close',
type: 'average',
valueDim: 'close'
}
],
tooltip: {
formatter: function (param) {
return param.name + '<br>' + (param.data.coord || '');
}
}
},
markLine: {
symbol: ['none', 'none'],
data: [
[
{
name: 'from lowest to highest',
type: 'min',
valueDim: 'lowest',
symbol: 'circle',
symbolSize: 10,
label: {
normal: {show: false},
emphasis: {show: false}
}
},
{
type: 'max',
valueDim: 'highest',
symbol: 'circle',
symbolSize: 10,
label: {
normal: {show: false},
emphasis: {show: false}
}
}
],
{
name: 'min line on close',
type: 'min',
valueDim: 'close'
},
{
name: 'max line on close',
type: 'max',
valueDim: 'close'
}
]
}
},
{
name: 'MA5',
type: 'line',
data: calculateMA(5),
smooth: true,
lineStyle: {
normal: {opacity: 0.5}
}
},
{
name: 'MA10',
type: 'line',
data: calculateMA(10),
smooth: true,
lineStyle: {
normal: {opacity: 0.5}
}
},
{
name: 'MA20',
type: 'line',
data: calculateMA(20),
smooth: true,
lineStyle: {
normal: {opacity: 0.5}
}
},
{
name: 'MA30',
type: 'line',
data: calculateMA(30),
smooth: true,
lineStyle: {
normal: {opacity: 0.5}
}
},
]
};
myChart.setOption(option);
});
</script>
HTML(datahead + datavar + funcstr + namestr + functail)
import tushare as ts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
stock_selected='002281'
df = ts.get_k_data(stock_selected, start='2016-01-01')
df.info()
#df['close'].plot(grid=True)
#df['42d']= np.round(pd.rolling_mean(df['close'],window=42),2)
#df['252d']= np.round(pd.rolling_mean(df['close'],window=252),2)
df['42d']= np.round(pd.Series.rolling(df['close'],window=42).mean(),2)
df['252d']= np.round(pd.Series.rolling(df['close'],window=252).mean(),2)
#df[['close','42d','252d']].tail(10)
df[['close','42d','252d']].plot(grid=True)
df['42-252']=df['42d']-df['252d']
#df['42-252'].tail(10)
SD=1
df['regime'] = np.where(df['42-252']>SD,1,0)
df['regime'] = np.where(df['42-252'] < -SD,-1,df['regime'])
#df['regime'].head(10)
df['regime'].tail(10)
#df['regime'].plot(lw=1.5)
#plt.ylim(-1.1, 1.1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Viewing Epochs metadata
Step2: Viewing the metadata values for a given epoch and metadata variable is done
Step3: Modifying the metadata
Step4: Selecting epochs using metadata queries
Step5: This capability uses the
Step6: Note also that traditional epochs subselection by condition name still works;
Step7: One use of the Pandas query string approach is to select specific words for
Step8: Notice that in this dataset, each "condition" (A.K.A., each word) occurs only
Step9: Metadata can also be useful for sorting the epochs in an image plot. For
Step10: Although there's no obvious relationship in this case, such analyses may be
Step11: You can remove metadata from an ~mne.Epochs object by setting its
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import pandas as pd
import mne
kiloword_data_folder = mne.datasets.kiloword.data_path()
kiloword_data_file = os.path.join(kiloword_data_folder,
'kword_metadata-epo.fif')
epochs = mne.read_epochs(kiloword_data_file)
epochs.metadata
print('Name-based selection with .loc')
print(epochs.metadata.loc[2:4])
print('\nIndex-based selection with .iloc')
print(epochs.metadata.iloc[2:4])
epochs.metadata['NumberOfLetters'] = \
epochs.metadata['NumberOfLetters'].map(int)
epochs.metadata['HighComplexity'] = epochs.metadata['VisualComplexity'] > 65
epochs.metadata.head()
print(epochs['WORD.str.startswith("dis")'])
print(epochs['Concreteness > 6 and WordFrequency < 1'])
epochs['solenoid'].plot_psd()
words = ['typhoon', 'bungalow', 'colossus', 'drudgery', 'linguist', 'solenoid']
epochs['WORD in {}'.format(words)].plot(n_channels=29)
evokeds = dict()
query = 'NumberOfLetters == {}'
for n_letters in epochs.metadata['NumberOfLetters'].unique():
evokeds[str(n_letters)] = epochs[query.format(n_letters)].average()
mne.viz.plot_compare_evokeds(evokeds, cmap=('word length', 'viridis'),
picks='Pz')
sort_order = np.argsort(epochs.metadata['WordFrequency'])
epochs.plot_image(order=sort_order, picks='Pz')
new_metadata = pd.DataFrame(data=['foo'] * len(epochs), columns=['bar'],
index=range(len(epochs)))
epochs.metadata = new_metadata
epochs.metadata.head()
epochs.metadata = None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Caveat
Step2: Second import statement (only run if you are not using jupyterlab)
Step3: Read the Data
Step4: The file itself is really large
Step5: Since all we care about are entries with URls or domains in them, we'll open up the file and read it line by line, just picking those and then entering them into a data frame.
Step6: Initial Exploration
Step7: When doing research, or data exploration it really helps knowing what the data looks like, so we take a small sample of it to look at it.
Step8: Let's check whether we've got a domain column....
Step9: Manipulate The Data
Step10: The Meat
Step11: Let's create a list of domains to "watch"
Step14: Let's now calculate the MinHash value for all of the domains on the watch list.
Step15: and to create the watched domain dict, which is an object that simply contains all the watched domains as keys and the value is the MinHash value and the depth. The depth is defined simply as the number of parts within the domain name, eg. www.mbl.is is 3 level deep, whereas cnn.com is 2, etc.
Step16: Now that we've got the common domains, let's calculate the jaccard distance (actually minhash ) and similarities (overlapping strings) for all of the domains in the history.
Step17: This can also be looked at visually.
Step18: So we see that someone is visiting a URL that looks very similar to our corp URL...
Step19: Timesketch Demo
Step20: Then we can take a look at what sketches are available, and let's pick one of them.
Step21: In this case we want to get the data from the Greendale incident.
Step22: Let's start to check whether or not there are saved views.
Step23: Let's look at what browser searches were made.
Step24: Let's look at some of the browser searches.
Step25: We can take a look at the frequency of search queries.
Step26: Problem with this is the fact that there are days with no search queries, that make the chart look a bit odd. Let's fill the values so that we have each day represented in the chart.
Step27: Now we can repeat what we did before, just this time with filled values.
Step28: We can also just look at saved aggregations...
Step29: They are more or less the same... we can take a look at one of them.
Step30: Or as a chart
Step31: Now we shall do something different. Let's look at all executions that are recorded from our prefetch parser. Once we've get these records we can then search for whether they appear somewhere in our web history.. that is whether there is a link between prefetch and browser history (this will miss ZIP files or other compressed files that get downloaded)
Step32: Now that we've got all the prefetch data, let's look at all executables and do a query for browser traffic that contains those names.
Step33: Let's combine the two data frames, that is the browser traffic and the prefetch data. We'll then sort it by dates.
Step34: First of all, let's just look at VPN.exe
Step35: And to examine all the hits
Step36: We can also extract the executable from the web history.
Step37: Now we can look at the most common applications
Step38: And then look at vpn.exe
Step39: Or we can do this as a chart...
Step40: OK.. these are all... but what about those executables that appear BOTH in a URL and Prefetch.
Step41: Now we can merge the two data frames.
Step42: And then we can do the same as we did above.
Step43: Search by label.
|
<ASSISTANT_TASK:>
Python Code:
import os
import re
import datetime
import getpass
import numpy as np
import pandas as pd
import altair as alt
import difflib
from timesketch_api_client import client as timesketch_client
#!pip install vega
from datasketch.minhash import MinHash
from six.moves import urllib_parse as urlparse
from timesketch.lib import similarity
from timesketch.lib.analyzers import utils
_ = alt.renderers.enable('notebook')
user_running_notebook = getpass.getuser()
DATA_LOCATION = '/Users/{0:s}/Documents/Presentation/data/studentpc1.jsonl'.format(user_running_notebook)
!du -hs $DATA_LOCATION
import json
data_entries = []
with open(DATA_LOCATION, 'r') as fh:
for line in fh:
parsed = json.loads(line)
if 'url' in parsed or 'domain' in parsed:
data_entries.append(parsed)
network_data = pd.DataFrame(data_entries)
del data_entries
network_data.shape
network_data.head(3)
if 'domain' in network_data.columns:
print('we do have a domain column')
else:
print('no such column exists....')
def get_domain(url):
domain_parsed = urlparse.urlparse(url)
domain_full = domain_parsed.netloc
domain, _, _ = domain_full.partition(':')
return domain
def get_tld(domain):
return '.'.join(domain.split('.')[-2:])
network_data['domain'] = network_data.url.apply(get_domain)
network_data['tld'] = network_data.domain.apply(get_tld)
network_data.tld.value_counts()[:10].keys()
# We "bootstrap" the watch list with the known "corp" domains that we've got and we know about.
watch_list = ['greendale.xyz']
# Let's add the top domains to the list.
watch_list.extend(network_data.tld.value_counts()[:10].keys())
watch_list.extend(network_data.domain.value_counts()[:10].keys())
# Remove empty records...
watch_list = [x for x in watch_list if x]
watch_list
# This is just some code from the phishy analyzer, not really something we go through here.
# Added here to have the code to play with.
domain_scoring_threshold = 0.75
def _get_minhash_from_domain(domain):
Get the Minhash value from a domain name.
This function takes a domain, removes the TLD extension
from it and then creates a MinHash object from every
remaining character in the domain.
If a domain starts with www., it will be stripped of the
domain before the Minhash is calculated.
Args:
domain: string with a full domain, eg. www.google.com
Returns:
A minhash (instance of datasketch.minhash.MinHash)
domain_items = domain.split('.')
domain_part = '.'.join(domain_items[:-1])
minhash = MinHash(similarity.DEFAULT_PERMUTATIONS)
for char in domain_part:
minhash.update(char.encode('utf8'))
return minhash
def _get_similar_domains(domain, domain_dict):
Compare a domain to a list of domains and return similar domains.
This function takes a domain and a dict object that contains
as key domain names and value the calculated MinHash value for that
domain as well as the domains depth (mbl.is is 2, foobar.evil.com would
be 3). It will then strip www. if needed from the domain, and compare
the Jaccard distance between all domains in the dict and the supplied
domain (removing the TLD extension from all domains).
If the Jaccard distance between the supplied domain and one or more of
the domains in the domain dict is higher than the configured threshold
the domain is further tested to see if there are overlapping substrings
between the two domains. If there is a common substring that is longer
than half the domain name and the Jaccard distance is above the
threshold the domain is considered to be similar.
Args:
domain: string with a full domain, eg. www.google.com
domain_dict: dict with domain names (keys) and MinHash objects
(values) for all domains to compare against.
Returns:
a list of tuples (score, similar_domain_name) with the names of
the similar domains as well as the Jaccard distance between
the supplied domain and the matching one.
domain = utils.strip_www_from_domain(domain)
similar = []
if '.' not in domain:
return similar
if domain in domain_dict:
return similar
if any(domain.endswith('.{0:s}'.format(x)) for x in domain_dict):
return similar
# We want to get rid of the TLD extension of the domain.
# This is only used in the substring match in case the Jaccard
# distance is above the threshold.
domain_items = domain.split('.')
domain_depth = len(domain_items)
domain_part = '.'.join(domain_items[:-1])
minhashes = {}
for index in range(0, domain_depth - 1):
minhashes[domain_depth - index] = _get_minhash_from_domain(
'.'.join(domain_items[index:]))
for watched_domain, watched_item in iter(domain_dict.items()):
watched_hash = watched_item.get('hash')
watched_depth = watched_item.get('depth')
minhash = minhashes.get(watched_depth)
if not minhash:
# The supplied domains length does not match this watched
# domain.
continue
score = watched_hash.jaccard(minhash)
if score < domain_scoring_threshold:
continue
watched_domain_items = watched_domain.split('.')
watched_domain_part = '.'.join(watched_domain_items[:-1])
# Check if there are also any overlapping strings.
sequence = difflib.SequenceMatcher(
None, domain_part, watched_domain_part)
match = sequence.find_longest_match(
0, len(domain_part), 0, len(watched_domain_part))
# We want to have at least half of the domain matching.
# TODO: This can be improved, this is a value and part that
# needs or can be tweaked. Perhaps move this to a config option
# that is the min length of strings.
match_size = min(
int(len(domain_part)/2), int(len(watched_domain_part)/2))
if match.size < match_size:
continue
similar.append((watched_domain, score))
return similar
watched_domains = {}
for domain in watch_list:
minhash = _get_minhash_from_domain(domain)
watched_domains[domain] = {
'hash': minhash,
'depth': len(domain.split('.'))
}
entries = []
for domain in network_data.domain.unique():
similar_domains = _get_similar_domains(domain, watched_domains)
if not similar_domains:
continue
print('Domain: {0:s} does have similar domains discovered:'.format(domain))
for similarities in similar_domains:
s_domain, s_score = similarities
print(' [{0:s}] - {1:0.2f}%'.format(s_domain, s_score * 100))
entry = {'domain': domain, 'watched_domain': s_domain, 'score': s_score}
entries.append(entry)
print('---')
similar_domains = pd.DataFrame(entries)
similar_domains.sort_values('score', ascending=False)
similar_domains['percentage'] = similar_domains.score * 100
alt.Chart(similar_domains).mark_point().encode(
y='domain', x='watched_domain', size='percentage',
color='watched_domain', fill='watched_domain',
tooltip=['watched_domain','domain', 'percentage'],
).properties(
width=600, height=400,
title='Similar Domains'
)
client = timesketch_client.TimesketchApi('https://demo.timesketch.org', 'demo', 'demo')
for index, sketch in enumerate(client.list_sketches()):
print('[{0:d}] ID: {1:d} - {2:s} <{3:s}>'.format(index, sketch.id, sketch.name, sketch.description))
sketch = client.get_sketch(238)
lines = []
for view in sketch.list_views():
view_dict = {
'id': view.id,
'name': view.name}
lines.append(view_dict)
pd.DataFrame(lines)
view = sketch.get_view(view_id=2010)
results = sketch.explore(
view=view, as_pandas=True,
return_fields='datetime,message,timestamp_desc,source,source_short,label,tag,tags,url,domain,search_string')
results.search_string.value_counts()
results['date'] = pd.to_datetime(results.datetime)
results['day'] = results.date.dt.strftime('%Y%m%d')
r_group = results[['day', 'datetime']].groupby(by='day', as_index=False)
r_count = r_group.count()
r_count['count'] = r_count['datetime']
del r_count['datetime']
alt.Chart(r_count, width=500, height=300).mark_line(point=True).encode(
x='day',
y='count',
tooltip=['day', 'count'],
).properties(
title='Search Queries per day'
)
# Let's skip the first day, since that was in the year 2000....
r = r_count[1:]
# We need to find out the first and last day.
r_first = r.day.values[0]
r_end = r.day.values[-1]
# Let's create a timestamp from the first day.
year = r_first[0:4]
month = r_first[4:6]
day = r_first[6:]
first_day = datetime.datetime(year=int(year), month=int(month), day=int(day))
# Now we can create a list of all dates between first and last date.
all_days = []
cur_day = first_day
while True:
day_string = cur_day.strftime('%Y%m%d')
if day_string == r_end:
break
all_days.append(day_string)
cur_day = cur_day + datetime.timedelta(days=1)
# Let's create sets, one for all days, the second for the current days.
cur_days = set([str(x) for x in r.day.values])
all_days = set(all_days)
# Now we can easily find a list of all missing dates.
missing_days = all_days.difference(cur_days)
# Let's create a data frame that contains just the missing dates, with a count of zero.
lines = []
for day in missing_days:
line = {'day': str(day), 'count': 0}
lines.append(line)
df = pd.DataFrame(lines)
r_filled = r.append(df, sort=True)
alt.Chart(r_filled, width=900, height=300).mark_line(point=True).encode(
x=alt.X('day', axis=alt.Axis(title='Day of Query')),
y='count',
tooltip=['day', 'count'],
).properties(
title='Search Queries per day'
)
aggregations = []
for index, agg in enumerate(sketch.list_aggregations()):
print('[{0:d}] - {1:s}, {2:s} <{3:s}> -> {4:s}'.format(index, agg.name, agg.aggregator_name, agg.type, agg.chart_type))
aggregations.append(agg)
aggregations[3].table
aggregations[3].chart
data = sketch.explore(
query_string='parser:"prefetch"', as_pandas=True,
return_fields='datetime,timestamp_desc,source,source_short,message,executable')
data.shape
browser_traffic = pd.DataFrame()
for executable in data.executable.unique():
if not executable:
continue
if executable is np.nan:
continue
if not executable.lower().endswith('.exe'):
continue
exec_data = sketch.explore(
query_string='url:"*{0:s}*"'.format(executable.lower()),
return_fields='datetime,timestamp_desc,source,source_short,message,domain,url',
as_pandas=True)
browser_traffic = pd.concat([browser_traffic, exec_data])
combined = pd.concat([browser_traffic, data], sort=False)
c_sorted = combined.sort_values(by='datetime')
c_sorted['date'] = pd.to_datetime(c_sorted.datetime)
c_sorted[c_sorted.message.str.contains('vpn.exe', case=False)][['date', 'timestamp_desc', 'message', 'url', 'executable', 'domain']][2:]
c_sorted[['date', 'timestamp_desc', 'message', 'url', 'executable']]
import six
def check_row(row):
executable = row.executable
if isinstance(executable, six.text_type):
if not executable:
return np.nan
return executable.lower()
url = row.url
if not isinstance(url, six.text_type):
return np.nan
_, _, last_part = url.rpartition('/')
if '&' in last_part:
last_part, _, _ = last_part.partition('&')
if '#' in last_part:
last_part, _, _ = last_part.partition('#')
if not last_part:
return np.nan
return last_part
c_sorted['new_exec'] = c_sorted.apply(check_row, axis=1)
c_sorted['day'] = c_sorted.date.dt.strftime('%Y%m%d')
c_sorted_data = c_sorted[~c_sorted.new_exec.isna()]
c_sorted_data.new_exec.value_counts()[:10]
c_sorted_data[c_sorted_data.new_exec == 'vpn.exe']
c_group = c_sorted[['day', 'date', 'new_exec']].groupby(by=['new_exec','day'], as_index=False)
c_count = c_group.count()
c_count['count'] = c_count.date
c_count['executable'] = c_count['new_exec']
del c_count['date']
alt.Chart(c_count).mark_point(filled=True).encode(
x='day',
y='executable',
size='count',
)
browser_traffic.head(10)
def get_exec(url):
items = [x.lower() for x in url.split('/')]
executable = ''
for item in items:
if not '.exe' in item:
continue
executable = item
if '#' in executable:
executable, _, _ = executable.partition('#')
if '&' in executable:
executable, _, _ = executable.partition('&')
if '\\' in executable:
_, _, executable = executable.rpartition('\\')
return executable
browser_traffic['new_exec'] = browser_traffic.url.apply(get_exec)
def get_exec(exec_string):
if not isinstance(exec_string, six.text_type):
return exec_string
return exec_string.lower()
data['new_exec'] = data.executable.apply(get_exec)
merged_df = data.merge(browser_traffic, on='new_exec')
merged_df['date'] = pd.to_datetime(merged_df['datetime_x'])
merged_df['day'] = merged_df.date.dt.strftime('%Y%m%d')
m_group = merged_df[['day', 'date', 'new_exec']].groupby(by=['new_exec','day'], as_index=False)
m_count = m_group.count()
m_count['count'] = m_count.date
m_count['executable'] = m_count['new_exec']
del m_count['date']
alt.Chart(m_count, width=300, height=200).mark_point(filled=True).encode(
x='day',
y='executable',
size='count',
color='executable'
).properties(
title='Executables that appear both in Prefetch and Internet history'
)
sketch.search_by_label('__ts_star', as_pandas=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification Project
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Your code goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 절편과 기울기를 추정한다.
Step2: 데이터에 대한 산점도와 적합선을 보여준다.
Step3: 동일한 도식화를 하지만, 역변환을 적용해서 선형(log 아님) 척도로 체중을 나타낸다.
Step4: 잔차 백분위수를 도식화한다.
Step5: 상관을 계산한다.
Step6: 결정계수를 계산한다.
Step7: $R^2 = \rho^2$ 임을 확증한다.
Step8: Std(ys)를 계산하는데, 신장을 사용하지 않은 예측 RMSE가 된다.
Step9: Std(res)를 계산하는데, 신장을 사용하는 예측 RMSE가 된다.
Step10: 신장 정보가 RMSE를 얼마나 줄이는가? 약 15%
Step11: 재표본추출을 사용해서 절편과 기울기에 대한 표집분포를 계산하시오.
Step12: 기울기에 대한 표집분포를 도식화하시오.
Step13: 기울기에 대한 p-값을 계산하시오.
Step14: 기울기 90% 신뢰구간을 계산하시오.
Step15: 표집분포의 평균을 계산하시오.
Step16: 표집분포에 대한 표준편차를 계산하시오. 이것이 표준오차다.
Step18: 표집가중치를 사용해서 재표본추출하시오.
Step19: 표집분포를 요약하시오.
Step20: 가중치 없이 행을 재표본추출하고 결과를 요약하시오.
Step21: 가중치를 갖고 행을 재표본추출하시오. 만약 표집 가중치를 고려하면, 추정된 평균 신장이 거의 2cm 더 크고, 차이는 표집오차보다 훨씬 크다.
|
<ASSISTANT_TASK:>
Python Code:
import brfss
import numpy as np
%matplotlib inline
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
heights, weights = df.htm3, df.wtkg2
weights = np.log10(weights)
import thinkstats2
inter, slope = thinkstats2.LeastSquares(heights, weights)
inter, slope
import thinkplot
thinkplot.Scatter(heights, weights, alpha=0.01)
fxs, fys = thinkstats2.FitLine(heights, inter, slope)
thinkplot.Plot(fxs, fys)
thinkplot.Config(xlabel='height (cm)', ylabel='log10 weight (kg)', legend=False)
thinkplot.Scatter(heights, 10**weights, alpha=0.01)
fxs, fys = thinkstats2.FitLine(heights, inter, slope)
thinkplot.Plot(fxs, 10**fys)
thinkplot.Config(xlabel='height (cm)', ylabel='weight (kg)', legend=False)
res = thinkstats2.Residuals(heights, weights, inter, slope)
df['residual'] = res
bins = np.arange(130, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
means = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.residual) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
ys = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, ys, label=label)
thinkplot.Config(xlabel='height (cm)', ylabel='residual weight (kg)', legend=False)
rho = thinkstats2.Corr(heights, weights)
rho
r2 = thinkstats2.CoefDetermination(weights, res)
r2
rho**2 - r2
std_ys = thinkstats2.Std(weights)
std_ys
std_res = thinkstats2.Std(res)
std_res
1 - std_res / std_ys
t = []
for _ in range(100):
sample = thinkstats2.ResampleRows(df)
estimates = thinkstats2.LeastSquares(sample.htm3, np.log10(sample.wtkg2))
t.append(estimates)
inters, slopes = zip(*t)
cdf = thinkstats2.Cdf(slopes)
thinkplot.Cdf(cdf)
thinkplot.Show(legend=False)
pvalue = cdf[0]
pvalue
ci = cdf.Percentile(5), cdf.Percentile(95)
ci
mean = thinkstats2.Mean(slopes)
mean
stderr = thinkstats2.Std(slopes)
stderr
def ResampleRowsWeighted(df, column='finalwt'):
Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
weights = df[column]
cdf = thinkstats2.Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def Summarize(estimates):
mean = thinkstats2.Mean(estimates)
stderr = thinkstats2.Std(estimates)
cdf = thinkstats2.Cdf(estimates)
ci = cdf.Percentile(5), cdf.Percentile(95)
print('mean', mean)
print('stderr', stderr)
print('ci', ci)
estimates_unweighted = [thinkstats2.ResampleRows(df).htm3.mean() for _ in range(100)]
Summarize(estimates_unweighted)
estimates_weighted = [ResampleRowsWeighted(df).htm3.mean() for _ in range(100)]
Summarize(estimates_weighted)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring the data
Step2: Each image is 28 x 28 pixels and represents a digit from 0 to 9. These images are black and white, so each pixel is a value from 0 (white) to 255 (black). Raw numbers can be hard to interpret sometimes, so we can plot the values to see the handwritten digit as an image.
Step3: Define the model
Step5: Write Input Functions
Step6: Time to train the model! The original MNIST linear classifier had an error rate of 12%. Let's use that to sanity check that our model is learning.
Step7: Evaluating Predictions
Step8: It's understandable why the poor computer would have some trouble. Some of these images are difficult for even humans to read. In fact, we can see what the computer thinks each digit looks like.
|
<ASSISTANT_TASK:>
Python Code:
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.layers import Dense, Flatten, Softmax
print(tf.__version__)
mnist = tf.keras.datasets.mnist.load_data()
(x_train, y_train), (x_test, y_test) = mnist
HEIGHT, WIDTH = x_train[0].shape
NCLASSES = tf.size(tf.unique(y_train).y)
print("Image height x width is", HEIGHT, "x", WIDTH)
tf.print("There are", NCLASSES, "classes")
IMGNO = 12
# Uncomment to see raw numerical values.
# print(x_test[IMGNO])
plt.imshow(x_test[IMGNO].reshape(HEIGHT, WIDTH))
print("The label for image number", IMGNO, "is", y_test[IMGNO])
def linear_model():
# TODO: Build a sequential model and compile it.
return model
BUFFER_SIZE = 5000
BATCH_SIZE = 100
def scale(image, label):
# TODO
def load_dataset(training=True):
Loads MNIST dataset into a tf.data.Dataset
(x_train, y_train), (x_test, y_test) = mnist
x = x_train if training else x_test
y = y_train if training else y_test
# TODO: a) one-hot encode labels, apply `scale` function, and create dataset.
# One-hot encode the classes
if training:
# TODO
return dataset
def create_shape_test(training):
dataset = load_dataset(training=training)
data_iter = dataset.__iter__()
(images, labels) = data_iter.get_next()
expected_image_shape = (BATCH_SIZE, HEIGHT, WIDTH)
expected_label_ndim = 2
assert images.shape == expected_image_shape
assert labels.numpy().ndim == expected_label_ndim
test_name = "training" if training else "eval"
print("Test for", test_name, "passed!")
create_shape_test(True)
create_shape_test(False)
NUM_EPOCHS = 10
STEPS_PER_EPOCH = 100
model = linear_model()
train_data = load_dataset()
validation_data = load_dataset(training=False)
OUTDIR = "mnist_linear/"
checkpoint_callback = ModelCheckpoint(OUTDIR, save_weights_only=True, verbose=1)
tensorboard_callback = TensorBoard(log_dir=OUTDIR)
history = model.fit(
# TODO: specify training/eval data, # epochs, steps per epoch.
verbose=2,
callbacks=[checkpoint_callback, tensorboard_callback],
)
BENCHMARK_ERROR = 0.12
BENCHMARK_ACCURACY = 1 - BENCHMARK_ERROR
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
assert accuracy[-1] > BENCHMARK_ACCURACY
assert val_accuracy[-1] > BENCHMARK_ACCURACY
print("Test to beat benchmark accuracy passed!")
assert accuracy[0] < accuracy[1]
assert accuracy[1] < accuracy[-1]
assert val_accuracy[0] < val_accuracy[1]
assert val_accuracy[1] < val_accuracy[-1]
print("Test model accuracy is improving passed!")
assert loss[0] > loss[1]
assert loss[1] > loss[-1]
assert val_loss[0] > val_loss[1]
assert val_loss[1] > val_loss[-1]
print("Test loss is decreasing passed!")
image_numbers = range(0, 10, 1) # Change me, please.
def load_prediction_dataset():
dataset = (x_test[image_numbers], y_test[image_numbers])
dataset = tf.data.Dataset.from_tensor_slices(dataset)
dataset = dataset.map(scale).batch(len(image_numbers))
return dataset
predicted_results = model.predict(load_prediction_dataset())
for index, prediction in enumerate(predicted_results):
predicted_value = np.argmax(prediction)
actual_value = y_test[image_numbers[index]]
if actual_value != predicted_value:
print("image number: " + str(image_numbers[index]))
print("the prediction was " + str(predicted_value))
print("the actual label is " + str(actual_value))
print("")
bad_image_number = 8
plt.imshow(x_test[bad_image_number].reshape(HEIGHT, WIDTH));
DIGIT = 0 # Change me to be an integer from 0 to 9.
LAYER = 1 # Layer 0 flattens image, so no weights
WEIGHT_TYPE = 0 # 0 for variable weights, 1 for biases
dense_layer_weights = model.layers[LAYER].get_weights()
digit_weights = dense_layer_weights[WEIGHT_TYPE][:, DIGIT]
plt.imshow(digit_weights.reshape((HEIGHT, WIDTH)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def reverse_delete(s,c):
s = ''.join([char for char in s if char not in c])
return (s,s[::-1] == s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make the AGN catalogs
Step2: Next we add in the lens properties to go with each of the images.
Step3: Make the SNe catalogs
Step4: Next we add in the lens properties to go with each of the images.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from astropy.io import fits
import numpy as np
from desc.sims.GCRCatSimInterface import InstanceCatalogWriter
from lsst.sims.utils import SpecMap
import matplotlib.pyplot as plt
from lsst.utils import getPackageDir
from lsst.sims.photUtils import Sed, BandpassDict, Bandpass
from lsst.sims.catUtils.matchSED import matchBase
import os
%matplotlib inline
base_columns = ['prefix', 'uniqueId', 'raPhoSim', 'decPhoSim',
'phosimMagNorm', 'sedFilepath', 'redshift',
'shear1', 'shear2', 'kappa', 'raOffset', 'decOffset',
'spatialmodel']
df_galaxy = pd.read_csv(os.path.join(os.environ['SCRATCH'],
'bulge_gal_cat_197356.txt.gz'),
delimiter=' ', header=None,
names=base_columns+['majorAxis', 'minorAxis',
'positionAngle', 'sindex',
'internalExtinctionModel',
'internalAv', 'internalRv',
'galacticExtinctionModel',
'galacticAv', 'galacticRv'])
df_disk = pd.read_csv(os.path.join(os.environ['SCRATCH'],
'disk_gal_cat_197356.txt.gz'),
delimiter=' ', header=None,
names=base_columns+['majorAxis', 'minorAxis',
'positionAngle', 'sindex',
'internalExtinctionModel',
'internalAv', 'internalRv',
'galacticExtinctionModel',
'galacticAv', 'galacticRv'])
df_agn = pd.read_csv(os.path.join(os.environ['SCRATCH'],
'agn_gal_cat_197356.txt.gz'),
delimiter=' ', header=None,
names=base_columns+['internalExtinctionModel',
'galacticExtinctionModel',
'galacticAv', 'galacticRv'])
df_agn['galaxy_id'] = np.right_shift(df_agn['uniqueId'], 10)
df_agn.head()
df_galaxy['galaxy_id'] = np.right_shift(df_galaxy['uniqueId'], 10)
df_galaxy.head()
df_disk['galaxy_id'] = np.right_shift(df_disk['uniqueId'], 10)
df_disk.head()
catalog_version = 'cosmoDC2_v1.1.4'
hdulist = fits.open('../../data/%s_matched_AGN.fits' % catalog_version)
twinkles_lenses = hdulist[1].data
agn_cache_df = pd.read_csv('../../data/%s_agn_cache.csv' % catalog_version)
#TwinklesId can range from 0 - 1099
ra_images = []
dec_images = []
num_images = []
twinkles_system_images = []
unique_id_lens = []
unique_id_bulge = []
unique_id_disk = []
bulge_index = []
disk_index = []
disk_valid = []
i = 0
for twinkles_system in range(len(twinkles_lenses)):
if twinkles_system % 100 == 0:
print(twinkles_system)
cache_info = agn_cache_df.query('twinkles_system == %i' % twinkles_system)
gal_id = cache_info['galtileid'].values[0]
lens_info = twinkles_lenses[np.where(twinkles_lenses['twinklesId'] == twinkles_system)]
bulge_info = df_galaxy.query('galaxy_id == %i' % gal_id)
disk_info = df_disk.query('galaxy_id == %i' % gal_id)
#print(bulge_info, disk_info)
for lens_image in range(lens_info[0]['NIMG']):
ra_shift = np.radians(lens_info[0]['XIMG'][lens_image] / 3600.) / np.cos(np.radians(bulge_info['decPhoSim']))
dec_shift = np.radians(lens_info[0]['YIMG'][lens_image] / 3600.)
image_ra = np.radians(bulge_info['raPhoSim']) + ra_shift
image_dec = np.radians(bulge_info['decPhoSim'] + dec_shift)
ra_images.append(np.degrees(image_ra).values[0])
dec_images.append(np.degrees(image_dec).values[0])
num_images.append(lens_image)
unique_id_lens.append(bulge_info['uniqueId'].values[0])
twinkles_system_images.append(twinkles_system)
base_id = (gal_id + int(1.5e10))*10000 + twinkles_system*4 + lens_image
unique_id_bulge.append(np.left_shift(base_id, 10) + 97)
unique_id_disk.append(np.left_shift(base_id, 10) + 107)
bulge_index.append(bulge_info.index[0])
if len(disk_info) == 0:
print(twinkles_system)
else:
disk_index.append(disk_info.index[0])
disk_valid.append(i)
i += 1
host_bulge = df_galaxy.iloc[bulge_index]
host_bulge = host_bulge.reset_index(drop=True)
host_bulge['twinkles_system'] = twinkles_system_images
host_bulge['image_number'] = num_images
host_bulge['uniqueId_lens'] = unique_id_lens
host_bulge['uniqueId'] = unique_id_bulge
host_bulge['raPhoSim_lens'] = host_bulge['raPhoSim']
host_bulge['decPhoSim_lens'] = host_bulge['decPhoSim']
host_bulge['raPhoSim'] = ra_images
host_bulge['decPhoSim'] = dec_images
host_bulge.head()
host_disk = df_disk.iloc[disk_index]
host_disk = host_disk.reset_index(drop=True)
host_disk['twinkles_system'] = np.array(twinkles_system_images)[disk_valid]
host_disk['image_number'] = np.array(num_images)[disk_valid]
host_disk['uniqueId_lens'] = np.array(unique_id_lens)[disk_valid]
host_disk['uniqueId'] = np.array(unique_id_disk)[disk_valid]
host_disk['raPhoSim_lens'] = host_disk['raPhoSim']
host_disk['decPhoSim_lens'] = host_disk['decPhoSim']
host_disk['raPhoSim'] = np.array(ra_images)[disk_valid]
host_disk['decPhoSim'] = np.array(dec_images)[disk_valid]
host_disk.head()
host_bulge.to_csv('../../data/%s_bulge_agn_host.csv' % catalog_version)
host_disk.to_csv('../../data/%s_disk_agn_host.csv' % catalog_version)
sne_df = pd.read_csv('../../data/%s_sne_cat.csv' % catalog_version)
sne_cache_df = pd.read_csv('../../data/%s_sne_cache.csv' % catalog_version)
sne_df.head()
#TwinklesId ranges from 1100 - 1100+number of cached SNe
ra_images = []
dec_images = []
num_images = []
twinkles_system_images = []
unique_id_lens = []
unique_id_bulge = []
unique_id_disk = []
bulge_index = []
disk_index = []
disk_valid = []
i = 0
for twinkles_system in range(1100, 1100+len(sne_cache_df)):
if twinkles_system % 100 == 0:
print(twinkles_system)
cache_info = sne_cache_df.query('twinkles_system == %i' % twinkles_system)
gal_id = cache_info['galtileid'].values[0]
lens_info = sne_df.iloc[np.where(sne_df['twinkles_sysno'] == twinkles_system)]
bulge_info = df_galaxy.query('galaxy_id == %i' % gal_id)
disk_info = df_disk.query('galaxy_id == %i' % gal_id)
#print(bulge_info, disk_info)
for lens_image in lens_info['imno'].values:
ra_shift = np.radians(lens_info['x'].iloc[lens_image] / 3600.) / np.cos(np.radians(bulge_info['decPhoSim']))
dec_shift = np.radians(lens_info['y'].iloc[lens_image] / 3600.)
image_ra = np.radians(bulge_info['raPhoSim']) + ra_shift
image_dec = np.radians(bulge_info['decPhoSim'] + dec_shift)
ra_images.append(np.degrees(image_ra).values[0])
dec_images.append(np.degrees(image_dec).values[0])
num_images.append(lens_image)
unique_id_lens.append(bulge_info['uniqueId'].values[0])
twinkles_system_images.append(twinkles_system)
base_id = (gal_id + int(1.5e10))*10000 + twinkles_system*4 + lens_image
unique_id_bulge.append(np.left_shift(base_id, 10) + 97)
unique_id_disk.append(np.left_shift(base_id, 10) + 107)
bulge_index.append(bulge_info.index[0])
if len(disk_info) == 0:
print(twinkles_system)
else:
disk_index.append(disk_info.index[0])
disk_valid.append(i)
i += 1
host_bulge = df_galaxy.iloc[bulge_index]
host_bulge = host_bulge.reset_index(drop=True)
host_bulge['twinkles_system'] = twinkles_system_images
host_bulge['image_number'] = num_images
host_bulge['uniqueId_lens'] = unique_id_lens
host_bulge['uniqueId'] = unique_id_bulge
host_bulge['raPhoSim_lens'] = host_bulge['raPhoSim']
host_bulge['decPhoSim_lens'] = host_bulge['decPhoSim']
host_bulge['raPhoSim'] = ra_images
host_bulge['decPhoSim'] = dec_images
host_bulge.head()
host_disk = df_disk.iloc[disk_index]
host_disk = host_disk.reset_index(drop=True)
host_disk['twinkles_system'] = np.array(twinkles_system_images)[disk_valid]
host_disk['image_number'] = np.array(num_images)[disk_valid]
host_disk['uniqueId_lens'] = np.array(unique_id_lens)[disk_valid]
host_disk['uniqueId'] = np.array(unique_id_disk)[disk_valid]
host_disk['raPhoSim_lens'] = host_disk['raPhoSim']
host_disk['decPhoSim_lens'] = host_disk['decPhoSim']
host_disk['raPhoSim'] = np.array(ra_images)[disk_valid]
host_disk['decPhoSim'] = np.array(dec_images)[disk_valid]
host_disk.head()
host_bulge.to_csv('../../data/%s_bulge_sne_host.csv' % catalog_version)
host_disk.to_csv('../../data/%s_disk_sne_host.csv' % catalog_version)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We genereate samples from following distribution
Step2: $x_0 = 10$
Step3: $x_0 = 17$
|
<ASSISTANT_TASK:>
Python Code:
# Example from section 29.4 & 29.6 (Fig 29.14 & 29.15) of https://www.inference.org.uk/itprnn/book.pdf
try:
import probml_utils as pml
except ModuleNotFoundError:
%pip install -qq git+https://github.com/probml/probml-utils.git
import probml_utils as pml
import matplotlib.pyplot as plt
import jax.numpy as jnp
import seaborn as sns
import os
pml.latexify(width_scale_factor=2, fig_height=2)
# create transition matrix
prob_vect = jnp.repeat(0.5, 20)
transition_mat = jnp.diag(prob_vect, -1) + jnp.diag(prob_vect, 1)
transition_mat = transition_mat.at[0, 0].set(0.5)
transition_mat = transition_mat.at[20, 20].set(0.5)
states = [0, 1, 2, 3, 10, 100, 200, 400]
MARKER_SIZE = 2 if pml.is_latexify_enabled() else 4
def calculate_q_at_state_no(state_no, p_initial, transition_mat):
transition_mat_at_state_no = jnp.linalg.matrix_power(transition_mat, state_no)
q = jnp.matmul(transition_mat_at_state_no, p_initial) # q = p_initial * T^n = transpose(T)^n * p_initial
return q
def plot_stem(ax):
markerline, stemlines, baseline = ax.stem(q, markerfmt="ro", linefmt="blue")
ax.set_yticks([])
ax.set_ylim(0, max(q) * 1.13)
ax.set_ylabel("$p_{" + str(state_no) + "}(x)$", fontdict={"rotation": 0}, labelpad=13)
baseline.set_linewidth(0)
stemlines.set_linewidth(1)
markerline.set_markersize(MARKER_SIZE)
sns.despine()
x_0 = 10 # initial state
p_initial = [0] * 21
p_initial[x_0] = 1
p_initial = jnp.array(p_initial)
p_initial
fig, axs = plt.subplots(len(states), 1)
axs[0].set_title(f"Initial Condition $x_0 = {x_0}$")
for ind, state_no in enumerate(states):
q = calculate_q_at_state_no(
state_no, p_initial, transition_mat
) # q is proposed probabilities for p(x) on particular markov state
plot_stem(axs[ind])
if state_no != states[-1]:
axs[ind].set_xticks([])
else:
axs[ind].set_xticks(range(0, 21, 5))
axs[ind].set_xlabel("$x$")
pml.savefig(f"Initial_state_{x_0}")
print(sum(q), 1 / 21, q)
x_0 = 17 # initial state
p_initial = [0] * 21
p_initial[x_0] = 1
p_initial = jnp.array(p_initial)
p_initial
fig, axs = plt.subplots(len(states), 1)
axs[0].set_title(f"Initial Condition $x_0 = {x_0}$")
for ind, state_no in enumerate(states):
ax = axs[ind]
q = calculate_q_at_state_no(state_no, p_initial, transition_mat)
plot_stem(ax)
if state_no != states[-1]:
axs[ind].set_xticks([])
else:
axs[ind].set_xticks(range(0, 21, 5))
axs[ind].set_xlabel("$x$")
pml.savefig(f"Initial_state_{x_0}")
print(sum(q), 1 / 21, q)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, define some useful functions. These are used in the printing of data later on. Also used for rendering images to the HTML page.
Step2: Question 1
Step3: Question 2
Step4: Question 3
Step5: Question 4
Step6: Question 5
Step7: Question 6.a &6.b
Step8: Question 7
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import seaborn as sns
import pandas as pd
from statsmodels.graphics.tsaplots import plot_acf
import scipy.stats
import matplotlib.pyplot as plt
from IPython.display import HTML, display
from io import BytesIO
from base64 import b64encode
import scipy.misc as smp
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator
from scipy.stats import norm
plt.rcParams['figure.figsize'] = (26, 13)
def printMatrix(data): #used to print matricies to HTML
display(HTML(
'<table><tr>{}</tr></table>'.format(
'</tr><tr>'.join(
'<td>{}</td>'
.format('</td><td>'
.join(str(_) for _ in row)) for row in data)
)
))
def printText(text):
display(HTML('<p>' + text + '<p>'))
def displayHTML(html):
display(HTML(html))
def drawImg(img):
b = BytesIO()
img.save(b, format='png')
displayHTML("<img src='data:image/png;base64,{0}'/>"
.format(b64encode(b.getvalue()).decode('utf-8')))
w1995 = np.genfromtxt('Data/1995.csv', delimiter=',')
w2000 = np.genfromtxt('Data/2000.csv', delimiter=',')
w2005 = np.genfromtxt('Data/2005.csv', delimiter=',')
w2010 = np.genfromtxt('Data/2010.csv', delimiter=',')
w2015 = np.genfromtxt('Data/2015.csv', delimiter=',')
weatherData = [w1995, w2000, w2005, w2010, w2015]
dataValues = np.zeros((4, 5))
counter = 0;
for year in weatherData:
dataValues[0, counter] = year[:, 3].min() #max of max values
dataValues[1, counter] = year[:, 1].max() #min of min values
dataValues[2, counter] = round(year[:, 2].mean(), 2) #average of average values
dataValues[3, counter] = round(year[:, 2].std(), 2) #Standard deviation of average values
counter = counter + 1;
displayHTML(pd.DataFrame(dataValues,
columns = ['1995', '2000', '2005', '2010', '2015'],
index = ['Minimum', 'Maximum', 'Mean', 'Standard Deviation']).to_html())
sns.set_style('whitegrid')
counter = 0;
for year in weatherData:
sns.distplot(year[:, 2], hist=False,
label=1995 + counter * 5,
axlabel="Temperature (C)")
counter = counter + 1;
plt.title('Probability Distribution Function')
plt.savefig("Figures/probability distribution function.pdf")
plt.show()
autoCorrelation = np.zeros((5, 5))
for x in range(0, 5):
for y in range(0, 5):
a = weatherData[x][0:365, 2]
b = weatherData[y][0:365, 2]
a = (a - np.mean(a)) / (np.std(a) * len(a))
b = (b - np.mean(b)) / (np.std(b))
autoCorrelation[x, y] = np.correlate(a, b)
columns = ['1995', '2000', '2005', '2010', '2015']
index = ['1995', '2000', '2005', '2010', '2015']
displayHTML(pd.DataFrame(autoCorrelation, columns, index).to_html())
counter = 0;
for year in weatherData:
if (counter == 0): #on the first loop, generate the subplots
fig, axs = plt.subplots(1, 2)
if counter == 4: # if we are at the last figure, we want it to be on its own line
plot_acf(year[:, 2],
title = "Autocorrelation for {}"
.format(1995 + 5 * counter))
plt.savefig("Figures/Autocorrelation for 2015.pdf")
plt.show()
else:
plot_acf(year[:, 2],
title = "Autocorrelation for {}"
.format(1995 + 5 * counter),
ax=axs[counter % 2])
if counter % 2 == 1: # every two figures, we need to generate a new row
plt.savefig("Figures/Autocorrelation for {} and {}.pdf"
.format(1995 + 5 * counter-5,1995 + 5 * counter))
plt.show()
if (counter < 2): #a new sub plot is needed on second row
fig, axs = plt.subplots(1, 2)
counter = counter + 1
counter = 0;
fig, axs = plt.subplots(1, 1)
for year in weatherData:
plot_acf(year[:, 2],
title = "Autocorrelation All 5 years together"
.format(1995 + 5 * counter),
ax=axs)
plt.savefig("Figures/All years Autocorrelation.pdf")
plt.show()
binColours = [[0.139681, 0.311666, 0.550652], [0.276518, 0.539432, 0.720771],
[0.475102, 0.695344, 0.802081], [0.670448, 0.803486, 0.824645],
[0.809791, 0.848259, 0.777550], [0.861927, 0.803423, 0.673050],
[0.830690, 0.667645, 0.546349], [0.742023, 0.475176, 0.424114],
[0.613033, 0.281826, 0.306352], [0.450385, 0.157961, 0.217975]]
counter = 0;
for year in weatherData:
num = 10
binsLow = np.linspace(year[:, 3].min() - 0.1, year[:, 3].max() + 0.1, num)
binsHigh = np.linspace(year[:, 1].min() - 0.1, year[:, 1].max() + 0.1, num)
digitizedLow = np.digitize(year[:, 3], binsLow) #put the data into the bins
digitizedHigh = np.digitize(year[:, 3], binsHigh)
rows = 2 * len(year[:, 3])
image = np.zeros((rows, 451, 3)) #make a matrix to store the image
#itterate over each year's values from the above values and set pixels colours
for x in range(0, rows, 2):
image[x - 1, 0:451] = binColours[int(digitizedLow[int(x / 2)])]
image[x, 0:451] = binColours[int(digitizedHigh[int(x / 2)])]
printText("Tempreture blanket for year: {}".format(1995 + 5 * counter))
displayHTML(pd.DataFrame(np.column_stack((binsLow.reshape(10, 1),
(binsHigh.reshape(10, 1)))),
columns=["Low bin", "High bin"]).to_html())
outputImage = smp.toimage(image)
outputImage.save('WeatherBlancketsOutput/{}.jpg'.format(1995 + 5 * counter))
drawImg(outputImage) #Draw image to screen, using custom draw function to put output in window
counter = counter + 1;
plt.rcParams['figure.figsize'] = (30, 20) #Make the figure for this question bigger
#define the variables to store the mean, std deviation,
# range for each day(temp), pdfs and a vector for number of days
meanDay = np.zeros((365, 1))
stdDay = np.zeros((365, 1))
dayArray = np.zeros((5, 1))
pdf = np.zeros((365, 365))
dayRange = range(0, 365, 1)
for day in dayRange:
for inx, year in enumerate(weatherData):
dayArray[inx, 0] = year[:, 2][day]
meanDay[day] = dayArray.mean() #calculate the mean
stdDay[day] = dayArray.std() # calculate the standard deviation
#generate a linear space of all days in the region, for the min to max temp
tempretureSpace = np.linspace(meanDay.min(),
meanDay.max(),
len(weatherData[0][:, 2]))
#itterate over the days again, now generating the pdf
for day in dayRange:
pdf[day, :] = norm.pdf(tempretureSpace,
meanDay[day],
stdDay[day])
#convert the values to a meshgrid
# (return coordinate matrices from coordinate vectors)
tempretureSpace, dayRange = np.meshgrid(tempretureSpace, dayRange)
#finally, plot it as a 3d surf
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(tempretureSpace,
dayRange,
pdf,
cmap=cm.coolwarm,
linewidth=0,
antialiased=False)
ax.set_xlabel('Temperature (C)')
ax.set_ylabel('Days')
ax.set_zlabel('f(x)')
#add a colour bar on the side to see magnitudes
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.title('Surface plot of stochastic process')
plt.savefig("Figures/surface plot of stochastic process.pdf")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The layout property can be shared between multiple widgets and assigned directly.
Step2: Description
Step3: You can change the length of the description to fit the description text. However, this will make the widget itself shorter. You can change both by adjusting the description width and the widget width using the widget's style.
Step4: If you need more flexibility to lay out widgets and descriptions, you can use Label widgets directly.
Step5: Natural sizes, and arrangements using HBox and VBox
Step6: Latex
Step7: Number formatting
Step10: Three buttons in an HBox. Items flex proportionally to their weight.
Step11: A more advanced example
Step12: A more advanced example
Step13: Predefined styles
Step14: The style attribute
Step15: You can get a list of the style attributes for a widget with the keys property.
Step16: Just like the layout attribute, widget styles can be assigned to other widgets.
Step17: Widget styling attributes are specific to each widget type.
Step18: The Grid layout
Step19: Placing items by name
Step20: Setting up row and column template and gap
Step22: Image layout and sizing
Step23: object_fit in a Box smaller than the original image
Step24: object_fit in a Box larger than the original image
Step25: It may be surprising, given the description of the values for option_fit, that in none of the cases does the image actually fill the box. The reason is that the underlying image is only 50 pixels wide, half the width of the box, so fill and cover mean "fill/cover the content box determined by the size of the image".
Step26: Examples of object_position
Step27: Specifying object_position with offsets in pixels
|
<ASSISTANT_TASK:>
Python Code:
from ipywidgets import Button, Layout
b = Button(description='(50% width, 80px height) button',
layout=Layout(width='50%', height='80px'))
b
Button(description='Another button with the same layout', layout=b.layout)
from ipywidgets import IntSlider
IntSlider(description='A too long description')
style = {'description_width': 'initial'}
IntSlider(description='A too long description', style=style)
from ipywidgets import HBox, Label
HBox([Label('A too long description'), IntSlider()])
from ipywidgets import Button, HBox, VBox
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=w) for w in words]
left_box = VBox([items[0], items[1]])
right_box = VBox([items[2], items[3]])
HBox([left_box, right_box])
from ipywidgets import IntSlider, Label
IntSlider(description=r'\(\int_0^t f\)')
Label(value=r'\(e=mc^2\)')
from ipywidgets import Layout, Button, Box
items_layout = Layout( width='auto') # override the default width of the button to 'auto' to let the button grow
box_layout = Layout(display='flex',
flex_flow='column',
align_items='stretch',
border='solid',
width='50%')
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=word, layout=items_layout, button_style='danger') for word in words]
box = Box(children=items, layout=box_layout)
box
from ipywidgets import Layout, Button, Box, VBox
# Items flex proportionally to the weight and the left over space around the text
items_auto = [
Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'),
Button(description='weight=3; auto', layout=Layout(flex='3 1 auto', width='auto'), button_style='danger'),
Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'),
]
# Items flex proportionally to the weight
items_0 = [
Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'),
Button(description='weight=3; 0%', layout=Layout(flex='3 1 0%', width='auto'), button_style='danger'),
Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'),
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='70%')
box_auto = Box(children=items_auto, layout=box_layout)
box_0 = Box(children=items_0, layout=box_layout)
VBox([box_auto, box_0])
from ipywidgets import Layout, Button, Box, FloatText, Textarea, Dropdown, Label, IntSlider
form_item_layout = Layout(
display='flex',
flex_flow='row',
justify_content='space-between'
)
form_items = [
Box([Label(value='Age of the captain'), IntSlider(min=40, max=60)], layout=form_item_layout),
Box([Label(value='Egg style'),
Dropdown(options=['Scrambled', 'Sunny side up', 'Over easy'])], layout=form_item_layout),
Box([Label(value='Ship size'),
FloatText()], layout=form_item_layout),
Box([Label(value='Information'),
Textarea()], layout=form_item_layout)
]
form = Box(form_items, layout=Layout(
display='flex',
flex_flow='column',
border='solid 2px',
align_items='stretch',
width='50%'
))
form
from ipywidgets import Layout, Button, VBox, Label
item_layout = Layout(height='100px', min_width='40px')
items = [Button(layout=item_layout, description=str(i), button_style='warning') for i in range(40)]
box_layout = Layout(overflow='scroll hidden',
border='3px solid black',
width='500px',
height='',
flex_flow='row',
display='flex')
carousel = Box(children=items, layout=box_layout)
VBox([Label('Scroll horizontally:'), carousel])
from ipywidgets import Button
Button(description='Danger Button', button_style='danger')
b1 = Button(description='Custom color')
b1.style.button_color = 'lightgreen'
b1
b1.style.keys
b2 = Button()
b2.style = b1.style
b2
s1 = IntSlider(description='Blue handle')
s1.style.handle_color = 'lightblue'
s1
from ipywidgets import Button, GridBox, Layout, ButtonStyle
header = Button(description='Header',
layout=Layout(width='auto', grid_area='header'),
style=ButtonStyle(button_color='lightblue'))
main = Button(description='Main',
layout=Layout(width='auto', grid_area='main'),
style=ButtonStyle(button_color='moccasin'))
sidebar = Button(description='Sidebar',
layout=Layout(width='auto', grid_area='sidebar'),
style=ButtonStyle(button_color='salmon'))
footer = Button(description='Footer',
layout=Layout(width='auto', grid_area='footer'),
style=ButtonStyle(button_color='olive'))
GridBox(children=[header, main, sidebar, footer],
layout=Layout(
width='50%',
grid_template_rows='auto auto auto',
grid_template_columns='25% 25% 25% 25%',
grid_template_areas='''
"header header header header"
"main main . sidebar "
"footer footer footer footer"
''')
)
GridBox(children=[Button(layout=Layout(width='auto', height='auto'),
style=ButtonStyle(button_color='darkseagreen')) for i in range(9)
],
layout=Layout(
width='50%',
grid_template_columns='100px 50px 100px',
grid_template_rows='80px auto 80px',
grid_gap='5px 10px')
)
from ipywidgets import Layout, Box, VBox, HBox, HTML, Image
fit_options = ['contain', 'cover', 'fill', 'scale-down', 'none', None]
hbox_layout = Layout()
hbox_layout.width = '100%'
hbox_layout.justify_content = 'space-around'
green_box_layout = Layout()
green_box_layout.width = '100px'
green_box_layout.height = '100px'
green_box_layout.border = '2px solid green'
def make_box_for_grid(image_widget, fit):
Make a VBox to hold caption/image for demonstrating
option_fit values.
# Make the caption
if fit is not None:
fit_str = "'{}'".format(fit)
else:
fit_str = str(fit)
h = HTML(value='' + str(fit_str) + '')
# Make the green box with the image widget inside it
boxb = Box()
boxb.layout = green_box_layout
boxb.children = [image_widget]
# Compose into a vertical box
vb = VBox()
vb.layout.align_items = 'center'
vb.children = [h, boxb]
return vb
# Use this margin to eliminate space between the image and the box
image_margin = '0 0 0 0'
# Set size of captions in figures below
caption_size = 'h4'
with open('images/gaussian_with_grid.png', 'rb') as f:
im_600_300 = f.read()
boxes = []
for fit in fit_options:
ib = Image(value=im_600_300)
ib.layout.object_fit = fit
ib.layout.margin = image_margin
boxes.append(make_box_for_grid(ib, fit))
vb = VBox()
h = HTML(value='<{size}>Examples of <code>object_fit</code> with large image</{size}>'.format(size=caption_size))
vb.layout.align_items = 'center'
hb = HBox()
hb.layout = hbox_layout
hb.children = boxes
vb.children = [h, hb]
vb
with open('images/gaussian_with_grid_tiny.png', 'rb') as f:
im_50_25 = f.read()
boxes = []
for fit in fit_options:
ib = Image(value=im_50_25)
ib.layout.object_fit = fit
ib.layout.margin = image_margin
boxes.append(make_box_for_grid(ib, fit))
vb = VBox()
h = HTML(value='<{size}>Examples of <code>object_fit</code> with small image</{size}>'.format(size=caption_size))
vb.layout.align_items = 'center'
hb = HBox()
hb.layout = hbox_layout
hb.children = boxes
vb.children = [h, hb]
vb
boxes = []
for fit in fit_options:
ib = Image(value=im_50_25)
ib.layout.object_fit = fit
ib.layout.margin = image_margin
# NOTE WIDTH IS SET TO 100%
ib.layout.width = '100%'
boxes.append(make_box_for_grid(ib, fit))
vb = VBox()
h = HTML(value='<{size}>Examples of <code>object_fit</code> with image '
'smaller than container</{size}>'.format(size=caption_size))
vb.layout.align_items = 'center'
hb = HBox()
hb.layout = hbox_layout
hb.children = boxes
vb.children = [h, hb]
vb
object_fit = 'none'
image_value = [im_600_300, im_50_25]
horz_keywords = ['left', 'center', 'right']
vert_keywords = ['top', 'center', 'bottom']
rows = []
for image, caption in zip(image_value, ['600 x 300 image', '50 x 25 image']):
cols = []
for horz in horz_keywords:
for vert in vert_keywords:
ib = Image(value=image)
ib.layout.object_position = '{horz} {vert}'.format(horz=horz, vert=vert)
ib.layout.margin = image_margin
ib.layout.object_fit = object_fit
# ib.layout.height = 'inherit'
ib.layout.width = '100%'
cols.append(make_box_for_grid(ib, ib.layout.object_position))
hb = HBox()
hb.layout = hbox_layout
hb.children = cols
rows.append(hb)
vb = VBox()
h1 = HTML(value='<{size}><code> object_position </code> by '
'keyword with large image</{size}>'.format(size=caption_size))
h2 = HTML(value='<{size}><code> object_position </code> by '
'keyword with small image</{size}>'.format(size=caption_size))
vb.children = [h1, rows[0], h2, rows[1]]
vb.layout.height = '400px'
vb.layout.justify_content = 'space-around'
vb.layout.align_items = 'center'
vb
object_fit = ['none', 'contain', 'fill', 'cover']
offset = '20px 10px'
image_value = [im_600_300]
boxes = []
for image, caption in zip(image_value, ['600 x 300 image', ]):
for fit in object_fit:
ib = Image(value=image)
ib.layout.object_position = offset
ib.layout.margin = image_margin
ib.layout.object_fit = fit
# ib.layout.height = 'inherit'
ib.layout.width = '100%'
title = 'object_fit: {}'.format(ib.layout.object_fit)
boxes.append(make_box_for_grid(ib, title))
vb = VBox()
h = HTML(value='<{size}><code>object_position</code> by '
'offset {offset} with several '
'<code>object_fit</code>s with large image</{size}>'.format(size=caption_size,
offset=offset))
vb.layout.align_items = 'center'
hb = HBox()
hb.layout = hbox_layout
hb.children = boxes
vb.children = [h, hb]
vb
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: 2. Flatten tree
Step3: 3. Use regexp path queries over tree!
|
<ASSISTANT_TASK:>
Python Code:
def root_tree_at(new_root):
Given a node, remove all parents and add as children
so that this node becomes the new root
# Check to see if the new root has any parents...
parents = new_root.xpath("..")
if len(parents) > 0:
p = root_tree_at(parents[0])
p.remove(new_root)
new_root.append(p)
return new_root
root = root_tree_at(root.xpath("//*[@cid='P1'][1]")[0])
t = XMLTree(root)
t.render_tree()
def flat_tree(root):
if root.get('dep_label') is not None:
s = '--%s--> %s' % (root.get('dep_label'), root.get('word'))
else:
s = root.get('word')
if len(root) > 0:
s += ' ( %s )' % ', '.join(filter(lambda x : x is not None, [flat_tree(c) for c in root]))
return s
ft = flat_tree(t.root)
print ft
re.search(r'[G](\s*--\w+-->\s*){0,5}[P]', ft)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem statement
Step2: Now let's assume this did not finish at once but took some long time
Step3: Continue the search
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(777)
from skopt import gp_minimize
from skopt import callbacks
from skopt.callbacks import CheckpointSaver
noise_level = 0.1
def obj_fun(x, noise_level=noise_level):
return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) + np.random.randn() * noise_level
checkpoint_saver = CheckpointSaver("./checkpoint.pkl", compress=9) # keyword arguments will be passed to `skopt.dump`
gp_minimize(obj_fun, # the function to minimize
[(-20.0, 20.0)], # the bounds on each dimension of x
x0=[-20.], # the starting point
acq_func="LCB", # the acquisition function (optional)
n_calls=10, # the number of evaluations of f including at x0
n_random_starts=0, # the number of random initialization points
callback=[checkpoint_saver], # a list of callbacks including the checkpoint saver
random_state=777);
from skopt import load
res = load('./checkpoint.pkl')
res.fun
x0 = res.x_iters
y0 = res.func_vals
gp_minimize(obj_fun, # the function to minimize
[(-20.0, 20.0)], # the bounds on each dimension of x
x0=x0, # already examined values for x
y0=y0, # observed values for x0
acq_func="LCB", # the acquisition function (optional)
n_calls=10, # the number of evaluations of f including at x0
n_random_starts=0, # the number of random initialization points
callback=[checkpoint_saver],
random_state=777);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cams', 'sandbox-2', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We are using TensorFlow-GPU 0.12.1 on Python 3.5.2, running on Windows 10 with Cuda 8.0.
Step2: Definitions
Step3: Data load
Step4: Model definition
Step5: Training and evaluation
Step6: Inspecting the result
Step7: Examples of correct predictions (tanh)
Step8: Examples of incorrect predictions (tanh)
Step9: Examples of correct predictions (relu)
Step10: Examples of incorrect predictions (relu)
Step11: Confusion matrix (tanh)
Step12: Confusion matrix (relu)
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
# We don't really need to import TensorFlow here since it's handled by Keras,
# but we do it in order to output the version we are using.
tf.__version__
import os.path
from IPython.display import Image
from util import Util
u = Util()
import numpy as np
# Explicit random seed for reproducibility
np.random.seed(1337)
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.datasets import mnist
batch_size = 512
nb_classes = 10
nb_epoch = 800
# checkpoint path
checkpoints_filepath_tanh = "checkpoints/02_MNIST_tanh_weights.best.hdf5"
checkpoints_filepath_relu = "checkpoints/02_MNIST_relu_weights.best.hdf5"
# model image path
model_image_path = 'images/model_02_MNIST.png' # saving only relu
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters1 = 20
nb_filters2 = 40
# size of pooling area for max pooling
pool_size1 = (2, 2)
pool_size2 = (3, 3)
# convolution kernel size
kernel_size1 = (4, 4)
kernel_size2 = (5, 5)
# dense layer size
dense_layer_size1 = 150
# dropout rate
dropout = 0.15
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
u.plot_images(X_train[0:9], y_train[0:9])
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model_tanh = Sequential()
model_relu = Sequential()
def initialize_network_with_activation_function(model, activation, checkpoints_filepath):
model.add(Convolution2D(nb_filters1, kernel_size1[0], kernel_size1[1],
border_mode='valid',
input_shape=input_shape, name='covolution_1_' + str(nb_filters1) + '_filters'))
model.add(Activation(activation, name='activation_1_' + activation))
model.add(MaxPooling2D(pool_size=pool_size1, name='max_pooling_1_' + str(pool_size1) + '_pool_size'))
model.add(Convolution2D(nb_filters2, kernel_size2[0], kernel_size2[1]))
model.add(Activation(activation, name='activation_2_' + activation))
model.add(MaxPooling2D(pool_size=pool_size2, name='max_pooling_1_' + str(pool_size2) + '_pool_size'))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(dense_layer_size1, name='fully_connected_1_' + str(dense_layer_size1) + '_neurons'))
model.add(Activation(activation, name='activation_3_' + activation))
model.add(Dropout(dropout))
model.add(Dense(nb_classes, name='output_' + str(nb_classes) + '_neurons'))
model.add(Activation('softmax', name='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy', 'precision', 'recall', 'mean_absolute_error'])
# loading weights from checkpoints
if os.path.exists(checkpoints_filepath):
model.load_weights(checkpoints_filepath)
initialize_network_with_activation_function(model_tanh, 'tanh', checkpoints_filepath_tanh)
initialize_network_with_activation_function(model_relu, 'relu', checkpoints_filepath_relu)
Image(u.maybe_save_network(model_relu, model_image_path), width=300)
# checkpoint
checkpoint_tanh = ModelCheckpoint(checkpoints_filepath_tanh, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list_tanh = [checkpoint_tanh]
# training
print('training tanh model')
history_tanh = model_tanh.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test), callbacks=callbacks_list_tanh)
# evaluation
print('evaluating tanh model')
score = model_tanh.evaluate(X_test, Y_test, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('Test error:', (1-score[2])*100, '%')
u.plot_history(history_tanh)
u.plot_history(history_tanh, metric='loss', loc='upper left')
# checkpoint
checkpoint_relu = ModelCheckpoint(checkpoints_filepath_relu, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list_relu = [checkpoint_relu]
# training
print('training relu model')
history_relu = model_relu.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test), callbacks=callbacks_list_relu)
# evaluation
print('evaluating relu model')
score = model_relu.evaluate(X_test, Y_test, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('Test error:', (1-score[2])*100, '%')
u.plot_history(history_relu)
u.plot_history(history_relu, metric='loss', loc='upper left')
# The predict_classes function outputs the highest probability class
# according to the trained classifier for each input example.
predicted_classes_tanh = model_tanh.predict_classes(X_test)
predicted_classes_relu = model_relu.predict_classes(X_test)
# Check which items we got right / wrong
correct_indices_tanh = np.nonzero(predicted_classes_tanh == y_test)[0]
incorrect_indices_tanh = np.nonzero(predicted_classes_tanh != y_test)[0]
correct_indices_relu = np.nonzero(predicted_classes_relu == y_test)[0]
incorrect_indices_relu = np.nonzero(predicted_classes_relu != y_test)[0]
u.plot_images(X_test[correct_indices_tanh[:9]], y_test[correct_indices_tanh[:9]],
predicted_classes_tanh[correct_indices_tanh[:9]])
u.plot_images(X_test[incorrect_indices_tanh[:9]], y_test[incorrect_indices_tanh[:9]],
predicted_classes_tanh[incorrect_indices_tanh[:9]])
u.plot_images(X_test[correct_indices_relu[:9]], y_test[correct_indices_relu[:9]],
predicted_classes_relu[correct_indices_relu[:9]])
u.plot_images(X_test[incorrect_indices_relu[:9]], y_test[incorrect_indices_relu[:9]],
predicted_classes_relu[incorrect_indices_relu[:9]])
u.plot_confusion_matrix(y_test, nb_classes, predicted_classes_tanh)
u.plot_confusion_matrix(y_test, nb_classes, predicted_classes_relu)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From mathematica
|
<ASSISTANT_TASK:>
Python Code:
def func(D, l, b, dD, dl, db):
q = 0.63
alpha = 2.42
rho0 = 5.6 / u.kpc**3
Rsun = 8. * u.kpc
x = D*np.cos(l)*np.cos(b) - Rsun
y = D*np.sin(l)*np.cos(b)
z = D*np.sin(b) / q
r = np.sqrt(x**2 + y**2 + z**2)
return D**2 * rho0 * (Rsun / r)**alpha * np.cos(b) * dD * dl.to(u.radian).value * db.to(u.radian).value
D_bin_edges = np.linspace(0.,30,20.)*u.kpc
D_bin_cntrs = (D_bin_edges[1:] + D_bin_edges[:-1]) / 2.
D_bin_size = D_bin_edges[1] - D_bin_edges[0]
# try to reproduce HerAq plot
dens = func(D_bin_cntrs, 40.*u.deg, 57.5*u.deg, D_bin_size, dl=80*u.deg, db=35.*u.deg) # HerAq
# dens = func(D_bin_cntrs, 130.*u.deg, -25*u.deg, D_bin_size) # TriAnd
plt.plot(D_bin_cntrs, dens)
bins = np.arange(0,40+2,2)
bin_ctr = (bins[1:]+bins[:-1])/2.
vals = np.array([3.63727, 17.6568, 31.8046, 41.6187, 47.4271, 50.4524, 51.7125, 51.892, 51.423, 50.5729, 49.5077, 48.33, 47.1042, 45.8698, 44.6517, 43.4645, 42.3168, 41.2132, 40.1556, 39.1443, 38.1786])
# this contains all Catalina RR Lyrae stars
# tbl = ascii.read("/Users/adrian/projects/streams/data/catalog/Catalina_all_RRLyr.txt")
# tbl.rename_column('RAdeg', 'ra')
# tbl.rename_column('DEdeg', 'dec')
# tbl.rename_column('dh', 'helio_dist')
# or:
tbl = ascii.read("/Users/adrian/Downloads/catalina.csv")
# tbl.remove_column("Num")
c = coord.SkyCoord(ra=tbl['ra'].data*u.deg, dec=tbl['dec'].data*u.deg)
gal = c.galactic
ix = (tbl['helio_dist'] < 21) & (tbl['helio_dist'] > 15)
plt.plot(gal.l.degree[ix],
gal.b.degree[ix],
linestyle='none')
plt.xlim(40,220)
plt.ylim(-75,0)
# box = [100,160,-35,-15]*u.degree
# box = [160,220,-35,-15]*u.degree # side box 1
box = [40,100,-40,-20]*u.degree # side box 2
ix = ((c.galactic.l > box[0]) & (c.galactic.l < box[1]) &
(c.galactic.b > box[2]) & (c.galactic.b < box[3]))
triand = tbl[ix].filled()
d = triand['helio_dist'].data
((d > 15) & (d < 21)).sum() / 0.8
plt.figure(figsize=(8,6))
# plt.errorbar(bin_ctr, vals[:-1], np.sqrt(vals[:-1]), ecolor='#777777', marker='o')
bins2 = np.arange(0,40+2,3)
plt.hist(d, bins=bins2, weights=np.ones_like(d)/0.7);
plt.xlabel(r"$d_{\rm helio}$")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating an Example Setup
Step2: Run the Example
Step3: Inputs and Outputs Overview
Step4: Inputs
Step5: Outputs
Step6: Other notable outputs
Step7: Trip matrices
Step8: Tracing calculations
Step9: Run the Multiprocessor Example
|
<ASSISTANT_TASK:>
Python Code:
!pip install activitysim
!activitysim create -e example_mtc -d example
%cd example
!activitysim run -c configs -d data -o output
import os
for root, dirs, files in os.walk(".", topdown=False):
for name in files:
print(os.path.join(root, name))
for name in dirs:
print(os.path.join(root, name))
print("Load libraries.")
import pandas as pd
import openmatrix as omx
import yaml
import glob
print("Display the settings file.\n")
with open(r'configs/settings.yaml') as file:
file_contents = yaml.load(file, Loader=yaml.FullLoader)
print(yaml.dump(file_contents))
print("Display the network_los file.\n")
with open(r'configs/network_los.yaml') as file:
file_contents = yaml.load(file, Loader=yaml.FullLoader)
print(yaml.dump(file_contents))
print("Input land_use. Primary key: TAZ. Required additional fields depend on the downstream submodels (and expression files).")
pd.read_csv("data/land_use.csv")
print("Input households. Primary key: HHID. Foreign key: TAZ. Required additional fields depend on the downstream submodels (and expression files).")
pd.read_csv("data/households.csv")
print("Input persons. Primary key: PERID. Foreign key: household_id. Required additional fields depend on the downstream submodels (and expression files).")
pd.read_csv("data/persons.csv")
print("Skims. All skims are input via one OMX file. Required skims depend on the downstream submodels (and expression files).\n")
print(omx.open_file("data/skims.omx"))
print("The output pipeline contains the state of each table after each model step.")
pipeline = pd.io.pytables.HDFStore('output/pipeline.h5')
pipeline.keys()
print("Households table after trip mode choice, which contains several calculated fields.")
pipeline['/households/joint_tour_frequency'] #watch out for key changes if not running all models
print("Final output households table to written to CSV, which is the same as the table in the pipeline.")
pd.read_csv("output/final_households.csv")
print("Final output persons table to written to CSV, which is the same as the table in the pipeline.")
pd.read_csv("output/final_persons.csv")
print("Final output tours table to written to CSV, which is the same as the table in the pipeline. Joint tours are stored as one record.")
pd.read_csv("output/final_tours.csv")
print("Final output trips table to written to CSV, which is the same as the table in the pipeline. Joint trips are stored as one record")
pd.read_csv("output/final_trips.csv")
print("Final output accessibility table to written to CSV.")
pd.read_csv("output/final_accessibility.csv")
print("Joint tour participants table, which contains the person ids of joint tour participants.")
pipeline['joint_tour_participants/joint_tour_participation']
print("Destination choice sample logsums table for school location if want_dest_choice_sample_tables=True.")
if '/school_location_sample/school_location' in pipeline:
pipeline['/school_location_sample/school_location']
print("trip matrices by time of day for assignment")
output_files = os.listdir("output")
for output_file in output_files:
if "omx" in output_file:
print(output_file)
print("All trace files.\n")
glob.glob("output/trace/*.csv")
print("Trace files for auto ownership.\n")
glob.glob("output/trace/auto_ownership*.csv")
print("Trace chooser data for auto ownership.\n")
pd.read_csv("output\\trace\\auto_ownership_simulate.simple_simulate.eval_mnl.choosers.csv")
print("Trace utility expression values for auto ownership.\n")
pd.read_csv("output\\trace\\auto_ownership_simulate.simple_simulate.eval_mnl.eval_utils.expression_values.csv")
print("Trace alternative total utilities for auto ownership.\n")
pd.read_csv("output\\trace\\auto_ownership_simulate.simple_simulate.eval_mnl.utilities.csv")
print("Trace alternative probabilities for auto ownership.\n")
pd.read_csv("output\\trace\\auto_ownership_simulate.simple_simulate.eval_mnl.probs.csv")
print("Trace random number for auto ownership.\n")
pd.read_csv("output\\trace\\auto_ownership_simulate.simple_simulate.eval_mnl.rands.csv")
print("Trace choice for auto ownership.\n")
pd.read_csv("output\\trace\\auto_ownership_simulate.simple_simulate.eval_mnl.choices.csv")
!activitysim run -c configs_mp -c configs -d data -o output
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Play with Multilabel classification format and f1-score
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc='upper left')
plt.figure(figsize=(8,6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True, random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False, random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", 'cca')
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", 'pca')
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
from sklearn.preprocessing import MultiLabelBinarizer
y_true = [[2,3,4], [2], [0,1,3], [0,1,2,3,4], [0,1,2]]
Y_true = MultiLabelBinarizer().fit_transform(y_true)
Y_true
y_pred = [[2,3], [2], [0,1,3], [0,1,3], [0,1,2]]
Y_pred = MultiLabelBinarizer(classes=[0,1,2,3,4]).fit_transform(y_pred)
from sklearn.metrics import f1_score
f1_score(y_pred=Y_pred, y_true=Y_true, average='macro')
Y_pred
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <span style="color
Step2: Reproducing Figure 1B
|
<ASSISTANT_TASK:>
Python Code:
import cobra
from utils import findBiomarkers, show_map
import pandas as pd
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
model = cobra.io.read_sbml_model("models/Shlomi_example.xml")
# write a for loop here.
# tip: make use of model.reactions and of the .reaction attribute of each reaction
exchanges = [ rxn for rxn in model.reactions if rxn.products == [] ]
exchangesIds = [rxn.id for rxn in exchanges]
findBiomarkers(model,fvaRxns=exchangesIds,mods=[model.reactions.R1.id],cutoff=0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 2
Step2: Now let's explore the data set itself.
Step3: There are no column names in the DataFrame. Let's add those.
Step4: Now we have a pandas DataFrame called bos containing all the data we want to use to predict Boston Housing prices. Let's create a variable called PRICE which will contain the prices. This information is contained in the target data.
Step5: EDA and Summary Statistics
Step6: Scatterplots
Step7: Histograms
Step8: Part 3
Step9: Let's see how our model actually fit our data. We can see below that there is a ceiling effect, we should probably look into that. Also, for large values of $Y$ we get underpredictions, most predictions are below the 45-degree gridlines.
Step10: Fitting Linear Regression using sklearn
Step11: <div class="span5 alert alert-info">
Step12: Predict Prices
Step13: <div class="span5 alert alert-info">
Step14: Part 4
|
<ASSISTANT_TASK:>
Python Code:
# special IPython command to prepare the notebook for matplotlib and other libraries
%matplotlib inline
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
# special matplotlib argument for improved plots
from matplotlib import rcParams
sns.set_style("whitegrid")
sns.set_context("poster")
from sklearn.datasets import load_boston
import pandas as pd
boston = load_boston()
boston.keys()
boston.data.shape
# Print column names
print(boston.feature_names)
# Print description of Boston housing data set
print(boston.DESCR)
bos = pd.DataFrame(boston.data)
bos.head()
bos.columns = boston.feature_names
bos.head()
print(boston.target.shape)
bos['PRICE'] = boston.target
bos.head()
bos.describe()
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.scatter(bos.CRIM, bos.PRICE)
plt.xlabel("Per capita crime rate by town (CRIM)")
plt.ylabel("Housing Price")
plt.title("Relationship between CRIM and Price")
# Answers to Part 2 Exercise Set 1
# Question 1) What kind of relationship do you see? e.g. positive, negative?
# linear? non-linear? Is there anything else strange or interesting about
# the data? What about outliers?
# I see a weak negative linear relationship. Yes, the data looks interesting in
# that its distribution appears to be positively skewed and has a few outliers.
# Part 2 Exercise Set 1
# Question 2: Create scatter plots between *RM* and *PRICE*, and PTRATIO and PRICE.
# Label your axes appropriately using human readable labels.
# Tell a story about what you see.
# Create scatter plots between *RM* and *PRICE*
plt.scatter(bos.RM, bos.PRICE)
plt.xlabel("Average Number of Rooms Per Dwelling (RM)")
plt.ylabel("Housing Price")
plt.title("Relationship between RM and Price")
# Part 2 Exercise Set 1:
# Create scatter plot between *PTRATIO* and *PRICE*
plt.scatter(bos.PTRATIO, bos.PRICE)
plt.xlabel("Pupil-Teacher Ratio by Town (PTRATIO)")
plt.ylabel("Housing Price")
plt.title("Relationship between PTRATIO and Price")
# Question 2 continuation: it appears that a positive linear
# relationship seemed to exist in the graph between average
# number of rooms per dwelling and housing price.
# your turn: create some other scatter plots
# scatter plot between *NOX* and *PRICE*
plt.scatter(bos.NOX, bos.PRICE)
plt.xlabel("Nitric Oxides Concentration (parts per 10 million) (NOX)")
plt.ylabel("Housing Price")
plt.title("Relationship between NOX and Price")
# Exercise 1: What are some other numeric variables of interest? Why do you think
# they are interesting? Plot scatterplots with these variables and
# PRICE (house price) and tell a story about what you see.
# In my opinion, other variables of interest would be nitric oxides
# concentration since it can describe pollutants in the area.
# Another is the column describing percent of black population
# that may describe neighborhood housing prices.
# your turn: create some other scatter plots
# Create a scatter plot between *NOX* and *PRICE*
plt.scatter(bos.B, bos.PRICE)
plt.xlabel("1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town (B)")
plt.ylabel("Housing Price")
plt.title("Relationship between B and Price")
# your turn: create some other scatter plots
# Create a scatter plot between *DIS* and *LSTAT*
plt.scatter(bos.DIS, bos.LSTAT)
plt.xlabel("weighted distances to five Boston employment centres (DIS)")
plt.ylabel("% lower status of the population")
plt.title("Relationship between DIS and LSTAT")
import seaborn as sns
sns.regplot(y="PRICE", x="RM", data=bos, fit_reg = True)
plt.hist(np.log(bos.CRIM))
plt.title("CRIM")
plt.xlabel("Crime rate per capita")
plt.ylabel("Frequencey")
plt.show()
# Part 2 Exercise 1: In the above histogram, we took the logarithm of the crime rate per
# capita. Repeat this histogram without taking the log.
plt.hist(bos.CRIM)
plt.title("CRIM")
plt.xlabel("Crime rate per capita")
plt.ylabel("Frequencey")
plt.show()
# Exercise 2 Question 1 continuation: What was the purpose of taking the log? What do we gain
# by making this transformation? What do you now notice about this variable that is not
# obvious without making the transformation?
# We usually take logarithms of variables that are multiplicatively related or in other
# words it's growing exponentially in time. By taking logarithms of variables before
# plotting the data, any exponential nature of variables is taken out of equation so
# that we can see the pattern in a linear model if that's the case. Logging in short,
# is similar to deflaton so that a trend can be straightened out and a linear model
# can be fitted.
# Before taking the logarithm of the variable, it's obvious that it's exponential in nature.
# Part 2 Exercise 2:
# Plot the histogram for RM and PTRATIO against each other, along
# with the two variables you picked in the previous section. We
# are looking for correlations in predictors here.
import seaborn as sns
sns.set(color_codes=True)
sns.jointplot(bos.RM, bos.PTRATIO)
# Part 2 Exercise 2 Continuation:
# Plot the histogram for the two variables you picked in
# the previous section.
import seaborn as sns
sns.set(color_codes=True)
sns.jointplot(bos.NOX, bos.PRICE)
# Import regression modules
import statsmodels.api as sm
from statsmodels.formula.api import ols
# statsmodels works nicely with pandas dataframes
# The thing inside the "quotes" is called a formula, a bit on that below
m = ols('PRICE ~ RM',bos).fit()
print(m.summary())
# Part 3 Exercise 1: Create a scatterplot between the predicted prices,
# available in m.fittedvalues (where m is the fitted model)
# and the original prices.
# Import regression modules
import statsmodels.api as sm
from statsmodels.formula.api import ols
# statsmodels works nicely with pandas dataframes
# The thing inside the "quotes" is called a formula, a bit on that below
m = ols('PRICE ~ RM',bos).fit()
# Create the scatter plot between predicted values and *PRICE*
plt.scatter(m.predict(), bos.PRICE)
plt.xlabel("Predicted Housing Price Based on Linear Regression")
plt.ylabel("Housing Price")
plt.title("Relationship between Predicted Price and Original Price")
from sklearn.linear_model import LinearRegression
X = bos.drop('PRICE', axis = 1)
# This creates a LinearRegression object
lm = LinearRegression()
lm
# Use all 13 predictors to fit linear regression model
lm.fit(X, bos.PRICE)
# Part 3 Exercise 2 Question:
# How would you change the model to not fit an intercept term?
# Would you recommend not having an intercept? Why or why not?
# To change the model to not fit an intercept term then
# we need to fit a linear regression through the origin (RTO).
# Using sklearn's LinearRegression function, I will have to set
# the fit_intercept parameter to False.
# As far as recommending whether to have an intercept or not,
# this would depend on the data set. Hocking (1996) and Adelman
# et.al. (1994) have found that a careful change of data range
# and data size needs to be considered. For example, if the
# data is far from the origin then fitting through the origin
# might present a discontinuity from an otherwise linear
# function with a positive or negative intercept. If uncertain,
# then one might run a couple of diagnostics. Hahn (1977)
# suggested to run a fit with and without an intercept then
# compare the standard errors to decide whether OLS or RTO
# provides a superior fit.
print('Estimated intercept coefficient: {}'.format(lm.intercept_))
print('Number of coefficients: {}'.format(len(lm.coef_)))
# The coefficients
pd.DataFrame({'features': X.columns, 'estimatedCoefficients': lm.coef_})[['features', 'estimatedCoefficients']]
# first five predicted prices
lm.predict(X)[0:5]
# Part 3 Exercise Set III:
# Question 1: Histogram: Plot a histogram of all the predicted prices. Write a story
# about what you see. Describe the shape, center and spread of the distribution.
# Are there any outliers? What might be the reason for them? Should we do
# anything special with them?
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.hist(lm.predict(X))
plt.title("Linear Regression")
plt.xlabel("Predicted Prices")
plt.ylabel("Frequency")
plt.show()
# The graph appears to be symmetric and bell-shaped, showing a normal
# distribution. The center seems to be around 20 in the x-axis.
# The spread of the distribution is from -5 to 45. Yes, there
# are outliers in the form of negative valued prices.
# Part 3 Exercise Set III
# Question 2: Scatterplot: Let's plot the true prices compared to
# the predicted prices to see they disagree
# (we did this with statsmodels before).
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Create the scatter plot between predicted values and *PRICE*
plt.scatter(lm.predict(X), bos.PRICE)
plt.xlabel("Predicted Housing Price Based on Linear Regression")
plt.ylabel("Housing Price")
plt.title("Relationship between Predicted Price and Original Price")
# Question 3: We have looked at fitting a linear model in both
# statsmodels and scikit-learn. What are the advantages
# and disadvantages of each based on your exploration?
# Based on the information provided by both packages,
# what advantage does statsmodels provide?
print(np.sum((bos.PRICE - lm.predict(X)) ** 2))
print(np.sum(lm.predict(X) - np.mean(bos.PRICE)) ** 2)
# Part 3 Exercise Set IV:
# Question 1: Fit a linear regression model using only the Pupil-teacher
# ratio by town (PTRATIO) column and interpret the coefficients.
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
import pandas as pd
lm = LinearRegression()
lm.fit(X[['PTRATIO']], bos.PRICE)
print('Estimated intercept coefficient: {}'.format(lm.intercept_))
print('Number of coefficients: {}'.format(len(lm.coef_)))
# Exercise 2: Calculate (or extract) the R2 value. What does it tell you?
lm.score(X[['PTRATIO']], bos.PRICE)
# Exercise 3: Compute the F-statistic. What does it tell you?
m = ols('PRICE ~ PTRATIO',bos).fit()
print(m.summary())
# Part 3 Exercise Set V
# Fit a linear regression model using three independent variables
# 1) 'CRIM' (per capita crime rate by town)
# 2) 'RM' (average number of rooms per dwelling)
# 3) 'PTRATIO' (pupil-teacher ratio by town)
lm = LinearRegression()
lm.fit(X[['CRIM','RM','PTRATIO']], bos.PRICE)
# Calculate (or extract) the R2 value.
lm.score(X[['CRIM', 'RM', 'PTRATIO']], bos.PRICE)
# Compute the F-statistic.
m = ols('PRICE ~ CRIM + RM + PTRATIO',bos).fit()
print(m.summary())
# Part 4
# Find another variable (or two) to add to the model we built in Part 3.
# Compute the F-test comparing the two models as well as the AIC. Which model is better?
m = ols('PRICE ~ CRIM + RM + PTRATIO + NOX + TAX',bos).fit()
print(m.summary())
# Part 5 Exercise 1:
# Create a scatter plot of fitted values versus residuals
plt.scatter(m.fittedvalues, m.resid)
plt.ylabel("Fitted Values")
plt.xlabel("Normalized residuals")
# Part 5 Exercise 2:
# Construct a quantile plot of the residuals.
from scipy import stats
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
x = stats.loggamma.rvs(c=2.5, size=500)
res = stats.probplot(m.resid, plot=ax)
ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
plt.show()
# Part 5 Exercise 3:
# What are some advantages and disadvantages of the fitted vs.
# residual and quantile plot compared to each other?
# Answer: The fitted vs. residual plot is the most frequently
# created plot using residuals analysis. Adavatages of
# plotting it is to be able to determine non-linearity,
# unequal error variances and outliers.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Text
Step2: Replace Character (Method 1)
Step3: Replace Character (Method 2)
|
<ASSISTANT_TASK:>
Python Code:
# Import library
import re
# Create text
text_data = ['Interrobang. By Aishwarya Henriette',
'Parking And Going. By Karl Gautier',
'Today Is The night. By Jarek Prakash']
# Remove periods
remove_periods = [string.replace('.', '') for string in text_data]
# Show text
remove_periods
# Create function
def replace_letters_with_X(string: str) -> str:
return re.sub(r'[a-zA-Z]', 'X', string)
# Apply function
[replace_letters_with_X(string) for string in remove_periods]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load NumPy data
Step2: Load from .npz file
Step3: Load NumPy arrays with tf.data.Dataset
Step4: Use the datasets
Step5: Build and train a model
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
DATA_URL = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz'
path = tf.keras.utils.get_file('mnist.npz', DATA_URL)
with np.load(path) as data:
train_examples = data['x_train']
train_labels = data['y_train']
test_examples = data['x_test']
test_labels = data['y_test']
train_dataset = tf.data.Dataset.from_tensor_slices((train_examples, train_labels))
test_dataset = tf.data.Dataset.from_tensor_slices((test_examples, test_labels))
BATCH_SIZE = 64
SHUFFLE_BUFFER_SIZE = 100
train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
model.fit(train_dataset, epochs=10)
model.evaluate(test_dataset)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def countSetBits(n ) :
i = 0
ans = 0
while(( 1 << i ) <= n ) :
k = 0
change = 1 << i
for j in range(0 , n + 1 ) :
ans += k
if change == 1 :
k = not k
change = 1 << i
else :
change -= 1
i += 1
return ans
if __name__== "__main __":
n = 17
print(countSetBits(n ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tamanho da lista
Step2: Concatenando listas
Step3: Para modificar a lista é necessário usar +=
Step4: Para anexar elementos no final da lista pode-se usar list.append()
Step5: É possível ter listas aninhadas
Step6: É possível repetir uma lista usando o operador *
Step7: Funções de lista
Step8: list.remove(i) ou del list[i] - Remove um item de uma posição
Step9: list.pop([i]) - Remove o item na posição e o retorna. Se nenhum item é fornecido o último é removido. (os colchetes em volta do indicam que esse parâmetro é opcional)
Step10: lista.extend(L) - Extende a lista anexando todos os elementos de L à lista.
Step11: lista.index(x) - Fornece o índice (ou posição) do elemento x na lista
Step12: lista.count(x) - Conta a quantidade de ocorrências do elemento x na lista
Step13: lista.sort() - Ordena a lita in-place. Para saber mais sobre operações in-place veja este post do Rafael Correia.
Step14: sorted() - ordena sequências sem ser in-place
Step15: lista.reverse() - reverte a ordem da lista in-place.
Step16: reversed() - reverte a ordem de uma sequência. Não é in-place.
Step17: Iterando listas
Step18: Iteração ao contrário
Step19: Iteração de uma sequência de números
Step20: Mais sobre a fução range(inicio, fim, passo) que gera uma lista de números começando de início, indo até fim, andando "passo" vezes.
Step21: Iteração pelos índices
Step22: List comprehension
Step23: Usando list comprehensions fica assim
Step24: Números ímpares
Step25: É o mesmo que
Step26: Cuidado!
Step27: Isso acontece pois a lista interna é criada apenas uma vez, quando o correto é criar 10 listas. Uma maneira elegante de fazer isso corretamente é usando list comprehension
Step28: No exemplo anterior aprendemos como declarar uma matriz vazia, porém caso queiramos criar uma matriz de tamanho fixo populadas por um valor padrão usamos aninhamento de list comprehension.
Step29: Exercícios
Step30: Complete a função pra encontrar o menor (mínimo) valor de uma lista
Step31: Agora que você já fez esses maravilhosos exercícios, vamos ver algumas funções úteis do Python
Step32: min(sequencia) e max(sequencia) - retorna, respectivamente, o elemento mínimo e máximo da sequência dada
Step33: all(sequencia) - retorna True se todos os valores da sequência forem verdadeiros
Step34: any(sequencia) - retorna True se algum dos valores da sequência for verdadeiro
Step35: round(numero, casas) - Arredonda um número para a quantidade de casas fornecido à função
Step36: Extra
Step37: Exemplos
Step38: Mais fácil que
Step39: Agora vamos separar as notas em duas listas, uma de aprovados (nota >= 5) e outra de reprovados
Step40: Com list comprehension fica fácil filtrar os arquivos .txt de um diretório. Para isso usaremos a função listdir() do módulo os que lista todos os arquivos de um diretório e a função str.endswith() de strings que compara o final da string com uma substring.
Step41: Suponhamos que administramos um sistema de gerenciamento de arquivos e um usuário envia 5 arquivos pesados. Precisamos verificar se esses arquivos cabem no espaço livre do sistema de arquivos para não dar problemas no servidor. Além disso também precisamos calcular a porcentagem de uso do espaço que cada arquivo ocupa.
Step42: Esses arquivos cabem no espaço fornecido?
Step43: Esses arquivos ocupam mais que 100% do espaço, então não cabe.
Step44: Para testar essa função precisariamos usar mock para mudar o efeito da função os.listdir(), portanto dessa vez você mesmo terá que testar sua função.
Step45: Tuplas
Step46: Tuplas não podem ser modificadas
Step47: Tuplas também podem ser criadas sem o parêntes, pois o que realmente as define são as vírgulas
Step48: É o mesmo que
Step49: Slices também funcionam em tuplas
Step50: Podemos fazer listas de tuplas para armazenar informações que não mudam, como
Step51: Por ser uma sequência a tupla suporta diversas operações como
Step52: Tuplas podem ser aninhadas
Step53: Tuplas podem conter objetos mutáveis como listas
Step54: Os objetos mutáveis dentro da tupla podem ser modificados
Step55: Mas não a tupla
Step56: "Mágica" do Python
Step57: Iso permite iterar sequências de uma maneira mais semântica, conforme exemplo a seguir.
Step58: Usando o empacotamento de tuplas
Step59: Ou ainda
|
<ASSISTANT_TASK:>
Python Code:
numeros = [1, 2, 3, 4]
numeros
len(numeros) # a função len funciona para todas as sequências
numeros + [5, 6, 7, 8]
numeros
numeros += [5, 6, 7, 8]
numeros
numeros
numeros.append(9)
numeros
aninhada = [1, 2, 3, [4, 5, 6]]
aninhada
aninhada[3]
aninhada[3][2]
zeros = [0] * 10
zeros
[1, 2, 3] * 5
lista = [1, 2, 4, 5]
lista
lista.insert(2, 3)
lista
lista = [1, 2, 3, 4, 5]
del lista[0]
lista
lista.remove(3)
lista
lista = [1, 2, 3, 4, 5]
lista.pop()
lista
lista.pop(0)
lista
lista = [1, 2, 3]
lista
lista.extend([4, 5, 6])
lista
lista = [1, 2, 3, 4, 5]
lista.index(3)
lista.index(5)
lista = [1, 1, 2, 3, 3, 3]
lista.count(2)
lista.count(1)
lista.count(3)
lista = [3, 1, 7, 0, 6]
lista
lista.sort() # ordena lista in-place
lista
lista = [8, 2, 4, 1, 3]
sorted(lista) # ordena lista
lista # sorted não altera o valor da lista
s = 'dcba'
sorted(s) # ordena todas as sequências e não só listas
lista = [1, 2, 3, 4, 5]
lista.reverse() # reverte a lista in-place
lista
lista = [1, 2, 3, 4, 5]
list(reversed(lista))
lista
s = 'abcdefg'
list(reversed(s)) # reverted todas as sequências e não só listas
numeros = [1, 2, 3, 4, 5]
for num in numeros:
print(num)
lista = ['sou uma string', -1.5, 10, 'outra string', False]
for elemento in reversed(lista): # usamos a função reversed!
print(elemento)
for i in range(5):
print(i)
intervalo = range(5, 10) # intervalo de 5 a 9
list(intervalo)
intervalo = range(0, 50, 5) # intervalo de 0 a 50 de 5 em 5
list(intervalo)
intervalo = range(-10, -50, -7) # intervalo de -10 a -50
list(intervalo)
lista = ['Estou', 'sem', 'criativdade', 'para', 'criar', 'lista']
for i in range(len(lista)):
print(i, lista[i])
numeros = [1, 2, 3, 4, 5, 6, 7]
quadrados = []
for numero in numeros:
quadrados.append(numero ** 2)
quadrados
numeros = [1, 2, 3, 4, 5, 6, 7]
quadrados = [numero ** 2 for numero in numeros] # mais legível
quadrados
numeros = [1, 2, 3, 4, 5, 6, 7]
impares = [numero for numero in numeros if numero % 2 == 1]
impares
numeros = [1, 2, 3, 4, 5, 6, 7]
impares = []
for numero in numeros:
if numero % 2 == 1:
impares.append(numero)
impares
matriz = [[]] * 5
matriz
matriz[0].extend([3, 1])
matriz
matriz[2].pop()
matriz
matriz = [[] for _ in range(5)]
matriz
matriz[0].extend([3, 1])
matriz
matriz[0].pop()
matriz
[[0 for _ in range(5)] for _ in range(5)]
def soma(numeros):
soma = 0
for numero in numeros:
soma += numero
return soma
# Testes para verificar a corretude da função soma
assert soma([3021.833, 8787.958, 8701.609, 9607.527, 2105.801]) == 32224.728000000003
assert soma([8612.141, -5273.932]) == 3338.209
assert soma([-8369.594, -5766.904, 7128.654]) == -7007.843999999999
assert soma([-6493.119, 647.921, -1889.898, 2814.045, 1443.082, -9671.503]) == -13149.472
assert soma([-4065.074, 8418.62]) == 4353.546
assert soma([2647.4, -3745.222, -1967.752]) == -3065.574
assert soma([-462.477, -4756.437, -6841.597]) == -12060.510999999999
assert soma([-9049.308, -8559.362]) == -17608.67
assert soma([-1311.291, 3207.467, -2685.757, 7758.034, 6556.007, 5677.477]) == 19201.936999999998
assert soma([7592.047, 8226.455, 5889.92, 3802.669]) == 25511.091
def minimo(numeros):
minimo = numeros[0]
for numero in numeros[1:]:
if numero < minimo:
minimo = numero
return minimo
# Testes para verificar a corretude da função soma
assert minimo([-6473.695, 438.199, 7959.093, 7842.664, 7233.021, -4149.884]) == -6473.695
assert minimo([6505.843, -7075.108]) == -7075.108
assert minimo([9151.244, -4605.586, -2920.8]) == -4605.586
assert minimo([-6586.813, -2264.44, 4460.277]) == -6586.813
assert minimo([1553.196, 1631.333]) == 1553.196
assert minimo([6876.253, 9349.21, -382.322]) == -382.322
assert minimo([9604.147, -378.112, 2574.795, -5597.589, 8751.16]) == -5597.589
assert minimo([2761.954, 4439.859, 9361.367, 884.972]) == 884.972
assert minimo([2989.882, 346.282, 9051.012, 4973.448, 1821.907]) == 346.282
assert minimo([4996.027, -5269.592]) == -5269.592
numeros = [1, 2, 3, 4]
sum(numeros)
numeros = [684.97, 514.68, 475.69, 194.07]
sum(numeros)
numeros = [1, 2, 3, 4]
min(numeros)
max(numeros)
min('abracadabra')
max('abracadabra')
seq = [True, True, False, False, True]
all(seq)
seq = [True] * 10
seq
all(seq)
numeros = [-2, 9, 0, 4, -7]
all(numeros)
seq = [False, True, False, False, True]
any(seq)
seq = [False] * 10
seq
any(seq)
numeros = [0, 9, 0, 0, -7]
any(numeros)
import random
random.random() # gera um número aleatório entre 0 e 1
numero = random.random() * 1000 # multiplica por 1000 para gerar um número com 3 dígitos significantes
numero
round(numero, 2)
round(numero, 5)
numero = 431637
round(numero, -1)
round(numero, -2)
round(numero, -3)
# gerando aleatóriamente as notas
import random
notas = [random.random() * 10 for i in range(5)] # gera 5 notas aleatórias
notas = [round(nota, 1) for nota in notas] # arredonda as notas para 1 casa decimal
notas
media = sum(notas) / len(notas)
media
soma_notas = 0
for nota in notas:
soma_notas += nota
soma_notas / len(notas)
aprovados = [nota for nota in notas if nota >= 5]
aprovados
reprovados = [nota for nota in notas if nota < 5]
reprovados
import os
os.listdir('..') # pegando arquivos do diretório acima do diretório deste notebook
[nome for nome in os.listdir('..') if nome.endswith('.txt')]
espaço_total = 1500000 # espaço livre total do sistema de arquivos em KB
tamanhos_arquivos = [388428.5, 189310.1, 310710.3, 160701, 898853.6] # tamanhos em KB dos arquivos enviados pelo usuário
porcentagens_uso = [tamanho / espaço_total for tamanho in tamanhos_arquivos]
porcentagens_uso = [round(porcentagem * 100, 2) for porcentagem in porcentagens_uso]
porcentagens_uso
sum(porcentagens_uso) <= 100
def lista_arquivos_por_extensao(diretorio, extensao):
...
def gera_matriz(m, n, default):
...
coordenadas = (40.716667, -74)
coordenadas
type(coordenadas)
coordenadas[0]
coordenadas[1]
coordenadas[0] = 10.5931
tupla = 'a', 'b', 'c', 'd'
tupla
tupla = ('a', 'b', 'c', 'd')
tupla
tupla[:2]
cidades = [
('Nova Iorque', 'E.U.A', (40.716667, -74)),
('Tóquio', 'Japão', (35.683333, 139.683333)),
('São Paulo', 'Brasil', (-23.547778, -46.635833))
]
cidades
cidades[1]
cidades[1][1]
tupla = ('Abc', False, 100)
for item in tupla:
print(item)
all(tupla)
any(tupla)
valores = (34, 40.1, -10.404)
sum(valores)
min(valores)
max(valores)
len(valores)
dados = ('Cidadopolis', 'Paisópolis')
cidade = dados, (40.716667, -74) # aninha a dados e coordenada dentro da tupla cidade
cidade
t = ([1, 2, 3], [3, 2, 1])
t
t[0][0] = 100
t
t[1][2] = -5
t
t[0] = [1, 2, 3]
cidade = ('Nova Iorque', 'E.U.A', (40.716667, -74))
cidade
nome, pais, coordenadas = cidade
nome
pais
coordenadas
cidades = [
('Nova Iorque', 'E.U.A', (40.716667, -74)),
('Tóquio', 'Japão', (35.683333, 139.683333)),
('São Paulo', 'Brasil', (-23.547778, -46.635833))
]
for cidade in cidades:
print(cidade[1])
for nome, pais, coordenadas in cidades:
print(pais)
cidades
for nome, pais, (x, y) in cidades:
print(pais)
for nome, pais, (x, y) in cidades:
print(x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convolutional Variational Autoencoder
Step2: Load the MNIST dataset
Step3: Use tf.data to batch and shuffle the data
Step5: Define the encoder and decoder networks with tf.keras.Sequential
Step7: Define the loss function and the optimizer
Step8: Training
Step9: Display a generated image from the last training epoch
Step10: Display an animated GIF of all the saved images
Step12: Display a 2D manifold of digits from the latent space
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tensorflow-probability
# to generate gifs
!pip install imageio
!pip install git+https://github.com/tensorflow/docs
from IPython import display
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
import tensorflow_probability as tfp
import time
(train_images, _), (test_images, _) = tf.keras.datasets.mnist.load_data()
def preprocess_images(images):
images = images.reshape((images.shape[0], 28, 28, 1)) / 255.
return np.where(images > .5, 1.0, 0.0).astype('float32')
train_images = preprocess_images(train_images)
test_images = preprocess_images(test_images)
train_size = 60000
batch_size = 32
test_size = 10000
train_dataset = (tf.data.Dataset.from_tensor_slices(train_images)
.shuffle(train_size).batch(batch_size))
test_dataset = (tf.data.Dataset.from_tensor_slices(test_images)
.shuffle(test_size).batch(batch_size))
class CVAE(tf.keras.Model):
Convolutional variational autoencoder.
def __init__(self, latent_dim):
super(CVAE, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latent_dim + latent_dim),
]
)
self.decoder = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(latent_dim,)),
tf.keras.layers.Dense(units=7*7*32, activation=tf.nn.relu),
tf.keras.layers.Reshape(target_shape=(7, 7, 32)),
tf.keras.layers.Conv2DTranspose(
filters=64, kernel_size=3, strides=2, padding='same',
activation='relu'),
tf.keras.layers.Conv2DTranspose(
filters=32, kernel_size=3, strides=2, padding='same',
activation='relu'),
# No activation
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=1, padding='same'),
]
)
@tf.function
def sample(self, eps=None):
if eps is None:
eps = tf.random.normal(shape=(100, self.latent_dim))
return self.decode(eps, apply_sigmoid=True)
def encode(self, x):
mean, logvar = tf.split(self.encoder(x), num_or_size_splits=2, axis=1)
return mean, logvar
def reparameterize(self, mean, logvar):
eps = tf.random.normal(shape=mean.shape)
return eps * tf.exp(logvar * .5) + mean
def decode(self, z, apply_sigmoid=False):
logits = self.decoder(z)
if apply_sigmoid:
probs = tf.sigmoid(logits)
return probs
return logits
optimizer = tf.keras.optimizers.Adam(1e-4)
def log_normal_pdf(sample, mean, logvar, raxis=1):
log2pi = tf.math.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi),
axis=raxis)
def compute_loss(model, x):
mean, logvar = model.encode(x)
z = model.reparameterize(mean, logvar)
x_logit = model.decode(z)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)
logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2, 3])
logpz = log_normal_pdf(z, 0., 0.)
logqz_x = log_normal_pdf(z, mean, logvar)
return -tf.reduce_mean(logpx_z + logpz - logqz_x)
@tf.function
def train_step(model, x, optimizer):
Executes one training step and returns the loss.
This function computes the loss and gradients, and uses the latter to
update the model's parameters.
with tf.GradientTape() as tape:
loss = compute_loss(model, x)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
epochs = 10
# set the dimensionality of the latent space to a plane for visualization later
latent_dim = 2
num_examples_to_generate = 16
# keeping the random vector constant for generation (prediction) so
# it will be easier to see the improvement.
random_vector_for_generation = tf.random.normal(
shape=[num_examples_to_generate, latent_dim])
model = CVAE(latent_dim)
def generate_and_save_images(model, epoch, test_sample):
mean, logvar = model.encode(test_sample)
z = model.reparameterize(mean, logvar)
predictions = model.sample(z)
fig = plt.figure(figsize=(4, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(predictions[i, :, :, 0], cmap='gray')
plt.axis('off')
# tight_layout minimizes the overlap between 2 sub-plots
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
# Pick a sample of the test set for generating output images
assert batch_size >= num_examples_to_generate
for test_batch in test_dataset.take(1):
test_sample = test_batch[0:num_examples_to_generate, :, :, :]
generate_and_save_images(model, 0, test_sample)
for epoch in range(1, epochs + 1):
start_time = time.time()
for train_x in train_dataset:
train_step(model, train_x, optimizer)
end_time = time.time()
loss = tf.keras.metrics.Mean()
for test_x in test_dataset:
loss(compute_loss(model, test_x))
elbo = -loss.result()
display.clear_output(wait=False)
print('Epoch: {}, Test set ELBO: {}, time elapse for current epoch: {}'
.format(epoch, elbo, end_time - start_time))
generate_and_save_images(model, epoch, test_sample)
def display_image(epoch_no):
return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))
plt.imshow(display_image(epoch))
plt.axis('off') # Display images
anim_file = 'cvae.gif'
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob('image*.png')
filenames = sorted(filenames)
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
import tensorflow_docs.vis.embed as embed
embed.embed_file(anim_file)
def plot_latent_images(model, n, digit_size=28):
Plots n x n digit images decoded from the latent space.
norm = tfp.distributions.Normal(0, 1)
grid_x = norm.quantile(np.linspace(0.05, 0.95, n))
grid_y = norm.quantile(np.linspace(0.05, 0.95, n))
image_width = digit_size*n
image_height = image_width
image = np.zeros((image_height, image_width))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z = np.array([[xi, yi]])
x_decoded = model.sample(z)
digit = tf.reshape(x_decoded[0], (digit_size, digit_size))
image[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit.numpy()
plt.figure(figsize=(10, 10))
plt.imshow(image, cmap='Greys_r')
plt.axis('Off')
plt.show()
plot_latent_images(model, 20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. 加载数据
Step2: 2. 检查数据
Step3: 3. 输出的 One-hot 编码
Step4: 同时我们将对输出进行 one-hot 编码。
Step5: 4. 模型构建
Step6: 5. 训练模型
Step7: 6. 评估模型
|
<ASSISTANT_TASK:>
Python Code:
# Imports
import numpy as np
import keras
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(42)
# Loading the data (it's preloaded in Keras)
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=1000)
print(x_train.shape)
print(x_test.shape)
print(x_train[0])
print(y_train[0])
# One-hot encoding the output into vector mode, each of length 1000
tokenizer = Tokenizer(num_words=1000)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
print(x_train[0])
# One-hot encoding the output
num_classes = 2
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(y_train.shape)
print(y_test.shape)
# TODO: Build the model architecture
model = Sequential()
model.add(Dense(128, input_dim=32))
model.add(Activation('softmax'))
model.add(Dense(10))
model.add(Activation('sigmoid'))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics = ['accuracy'])
# TODO: Compile the model using a loss function and an optimizer.
model.summary()
# TODO: Run the model. Feel free to experiment with different batch sizes and number of epochs.
score = model.evaluate(x_test, y_test, verbose=0)
print("Accuracy: ", score[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 0. General note
Step2: 3. Compare
Step3: <img src='./tables/Jamieson_Au_1.png'>
|
<ASSISTANT_TASK:>
Python Code:
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
from uncertainties import unumpy as unp
import pytheos as eos
eta = np.linspace(0., 0.225, 46)
print(eta)
jamieson_aul = eos.gold.Jamieson1982L()
jamieson_auh = eos.gold.Jamieson1982H()
jamieson_aul.print_equations()
jamieson_auh.print_equations()
jamieson_aul.print_parameters()
jamieson_auh.print_parameters()
v0 = 67.84747902176544
jamieson_aul.three_r
jamieson_auh.three_r
v = v0 * (1.-eta)
temp = 1500.
p = jamieson_auh.cal_p(v, temp * np.ones_like(v))
print('for T = ', temp)
for eta_i, p_i in zip(eta, p):
print("{0: .3f} {1: .2f}".format(eta_i, p_i))
v = jamieson_auh.cal_v(p, temp * np.ones_like(p), min_strain=0.6)
print(1.-(v/v0))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This PGM illustrates the joint PDF for the parameters and the data, which can be factorised as
Step2: Good. Here's the code that is being run, inside the "XrayData" class
Step3: To normalize this, we need to take care not to try and exponentiate any very large or small numbers...
Step4: Let's plot this as a 2D probability density map.
|
<ASSISTANT_TASK:>
Python Code:
# import cluster_pgm
# cluster_pgm.inverse()
from IPython.display import Image
Image(filename="cluster_pgm_inverse.png")
%load_ext autoreload
%autoreload 2
import cluster
lets = cluster.XrayData()
lets.read_in_data()
lets.set_up_maps()
x0,y0 = 328,328 # The center of the image is 328,328
S0,b = 0.001,1e-6 # Cluster and background surface brightness, arbitrary units
beta = 2.0/3.0 # Canonical value is beta = 2/3
rc = 12 # Core radius, in pixels
logprob = lets.evaluate_unnormalised_log_posterior(x0,y0,S0,rc,beta,b)
print logprob
import numpy as np
npix = 15
xmin,xmax = 327.7,328.3
ymin,ymax = 346.4,347.0
x0grid = np.linspace(xmin,xmax,npix)
y0grid = np.linspace(ymin,ymax,npix)
logprob = np.zeros([npix,npix])
for i,x0 in enumerate(x0grid):
for j,y0 in enumerate(y0grid):
logprob[j,i] = lets.evaluate_unnormalised_log_posterior(x0,y0,S0,rc,beta,b)
print "Done column",i
print logprob[0:5,0]
Z = np.max(logprob)
prob = np.exp(logprob - Z)
norm = np.sum(prob)
prob /= norm
print prob[0:5,0]
import astropy.visualization as viz
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 10.0)
plt.imshow(prob, origin='lower', cmap='Blues', interpolation='gaussian', extent=[xmin,xmax,ymin,ymax])
plt.xlabel('x / pixels')
plt.ylabel('y / pixels')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'hadgem3-gc31-mh', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate some synthetic data
Step2: Train Isolation Forest with a validation set
Step3: The trained model will have different kind of metrics for training and validation frame. For training - where we don't have labeled data - anomaly metrics will be returned. For validation frame we will see binomial model metrics.
Step4: The output incudes predicted class of the observation not anomaly/anomaly. This is accomplished by using the validation frame. In current implementation we pick the threshold to maximize the F1 score.
Step5: Train Isolation Forest using contamination parameter
|
<ASSISTANT_TASK:>
Python Code:
import sys
import h2o
from h2o.frame import H2OFrame
import numpy as np
import pandas as pd
h2o.init(strict_version_check=False)
N = 1000
cont = 0.05 # ratio of outliers/anomalies
regular_data = np.random.normal(0, 0.5, (int(N*(1-cont)), 2))
anomaly_data = np.column_stack((np.random.normal(-1.5, 1, int(N*cont)), np.random.normal(1.5, 1, int(N*cont))))
import matplotlib.pyplot as plt
plt.scatter(anomaly_data[:,0], anomaly_data[:,1])
plt.scatter(regular_data[:,0], regular_data[:,1])
plt.show()
regular_pd = pd.DataFrame({'x': regular_data[:, 0], 'y': regular_data[:, 1], 'label': np.zeros(regular_data.shape[0])})
anomaly_pd = pd.DataFrame({'x': anomaly_data[:, 0], 'y': anomaly_data[:, 1], 'label': np.ones(anomaly_data.shape[0])})
dataset = H2OFrame(regular_pd.append(anomaly_pd).sample(frac=1))
train_with_label, test = dataset.split_frame([0.8])
train_with_label["label"].table()
test["label"].table()
train = train_with_label.drop(["label"])
test["label"] = test["label"].asfactor()
from h2o.estimators.isolation_forest import H2OIsolationForestEstimator
from h2o.model.metrics_base import H2OAnomalyDetectionModelMetrics, H2OBinomialModelMetrics
if_model = H2OIsolationForestEstimator(seed=12, ntrees=200,
score_tree_interval=7, stopping_rounds=3, stopping_metric="mean_per_class_error",
validation_response_column="label")
if_model.train(training_frame=train, validation_frame=test)
if_model
predicted = if_model.predict(train)
predicted.head()
predicted_train_labels = predicted["predict"].as_data_frame(use_pandas=True)
train_pd = train.as_data_frame(use_pandas=True)
plt.scatter(train_pd["x"], train_pd["y"], c=predicted_train_labels["predict"])
plt.show()
if_model.model_performance(train_with_label)
if_model_cont = H2OIsolationForestEstimator(seed=12, contamination=cont)
if_model_cont.train(training_frame=train)
if_model_cont
predicted_train_labels_cont = if_model_cont.predict(train)["predict"].as_data_frame(use_pandas=True)
plt.scatter(train_pd["x"], train_pd["y"], c=predicted_train_labels_cont["predict"])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We then grab all the images in the folder with the data block API. We don't create a validation set here for reasons we'll explain later. It consists of random noise of size 100 by default (can be changed if you replace generate_noise by partial(generate_noise, size=...)) as inputs and the images of bedrooms as targets.
Step2: We'll begin with a small size since GANs take a lot of time to train.
Step3: Models
|
<ASSISTANT_TASK:>
Python Code:
path = untar_data(URLs.LSUN_BEDROOMS)
dblock = DataBlock(blocks = (TransformBlock, ImageBlock),
get_x = generate_noise,
get_items = get_image_files,
splitter = IndexSplitter([]))
def get_dls(bs, size):
dblock = DataBlock(blocks = (TransformBlock, ImageBlock),
get_x = generate_noise,
get_items = get_image_files,
splitter = IndexSplitter([]),
item_tfms=Resize(size, method=ResizeMethod.Crop),
batch_tfms = Normalize.from_stats(torch.tensor([0.5,0.5,0.5]), torch.tensor([0.5,0.5,0.5])))
return dblock.dataloaders(path, path=path, bs=bs)
dls = get_dls(128, 64)
dls.show_batch(max_n=16)
generator = basic_generator(64, n_channels=3, n_extra_layers=1)
critic = basic_critic (64, n_channels=3, n_extra_layers=1, act_cls=partial(nn.LeakyReLU, negative_slope=0.2))
learn = GANLearner.wgan(dls, generator, critic, opt_func = partial(Adam, mom=0.))
learn.recorder.train_metrics=True
learn.recorder.valid_metrics=False
learn.fit(30, 2e-4, wd=0)
#learn.gan_trainer.switch(gen_mode=True)
learn.show_results(max_n=16, figsize=(8,8), ds_idx=0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Solución
Step2: Pregunta 2.b
Step3: Pregunta 2.c
Step4: Pregunta 3 [40%]
Step5: Pregunta 3.a
Step6: Pregunta 3.b
Step7: Pregunta 3.c
|
<ASSISTANT_TASK:>
Python Code:
# n numero entero
def f(n):
l = []
for i in range(len(str(n))-1,-1,-1):
x = n/(10**i)
l.append(x)
n=n %(10**i)
return l
f(421)
# Cargando los datos
clientes = [('Don Ramon', 3500, (9, 4, 2014)),
('Miguel', 2785, (30,10, 2014)),
('Cesar', 100, (28, 5, 2015)), # ...
]
def deuda_total(clientes):
total = 0
for nombre, deuda, fecha in clientes:
total += deuda
return total
deuda_total(clientes)
def mayor_deudor(clientes, ultimo):
nombre_mayor_deudor = ""
deuda_mayor_deudor = -float("inf")
for nombre, deuda, (dia, mes, anno) in clientes:
if anno==ultimo and deuda>deuda_mayor_deudor:
nombre_mayor_deudor=nombre
deuda_mayor_deudor=deuda
return nombre_mayor_deudor
print mayor_deudor(clientes, 2014)
print mayor_deudor(clientes, 2015)
print mayor_deudor(clientes, 2016)
# SACAR ELEMENTO DE TUPLA
tupla = ("zero", "uno","dos","tres","cuatro")
# No es posible directamente, hay que generar una nueva tupla
tupla = (tupla[0], tupla[2], tupla[3], tupla[4])
print tupla
# SACAR ELEMENTO DE LISTA
lista = ["zero", "uno","dos","tres","cuatro"]
lista.pop(1)
print lista
# SACAR ELEMENTO DE DICCIONARIO
diccio = {"zero":0, "uno":1,"dos":2,"tres":3,"cuatro":4}
del diccio["uno"]
print diccio
# SACAR ELEMENTO DE CONJUNTO
conjunto = {"zero", "uno","dos","tres","cuatro"}
conjunto.remove("uno")
print conjunto
def pagar(clientes, pago):
nombre_pagador, monto_pago, fecha_pago = pago
for i in range(len(clientes)):
nombre, deuda, fecha = clientes[i]
if nombre_pagador==nombre:
deuda_restante = deuda - monto_pago
if deuda_restante<=0:
clientes.pop(i)
else:
deuda_actualizada = (nombre, deuda_restante, fecha_pago)
clientes[i] = deuda_actualizada
return clientes
clientes = [('Don Ramon', 3500, (9, 4, 2014)),
('Miguel', 2785, (30,10, 2014)),
('Cesar', 100, (28, 5, 2015)), # ...
]
print pagar(clientes, ('Miguel', 85, (3, 6, 2015)))
print pagar(clientes, ('Cesar', 100, (4, 6, 2015)))
def pagar(clientes, pago):
nombre_pagador, monto_pago, fecha_pago = pago
for cliente in clientes:
nombre, deuda, fecha = cliente
if nombre_pagador==nombre:
deuda_restante = deuda - monto_pago
if deuda_restante<=0:
clientes.remove(cliente)
else:
deuda_actualizada = (nombre, deuda_restante, fecha_pago)
cliente = deuda_actualizada
return clientes
clientes = [('Don Ramon', 3500, (9, 4, 2014)),
('Miguel', 2785, (30,10, 2014)),
('Cesar', 100, (28, 5, 2015)), # ...
]
print pagar(clientes, ('Miguel', 85, (3, 6, 2015)))
print pagar(clientes, ('Cesar', 100, (4, 6, 2015)))
# CARGAR LOS DATOS
# nombre: [detalle, edad, habilidad, (min-poder, max-poder)]
superheroes = {
'Iron man': ['mk42', 50 , 'uni-rayo', (45, 95)],
'Thor': ['hijo de odin', 10000 , 'mjolnir', (50, 100)],
'Condorito':['de pelotillehue', 40, 'washington', (1, 10)],
'Chapulin Colorado': ['no contaban con mi astucia', 40, 'chipote chillon', (40, 90)],
# ...
}
# nombre : nombre_asociados
asociados = {
'Iron man': set(['Thor', 'Black Widow', 'Hawkeye', 'Hulk']),
'Thor': set(['Iron man', 'Hulk', 'Chapulin Colorado']),
'Condorito' : set(['Don Chuma', 'Hulk']),
'Chapulin Colorado' : set(['Condorito', 'Thor']),
# ...
}
def diferencias_poder(superheroes, diferenciapoder, umbral):
superheroes_seleccionados = []
for nombre, caracteristicas in superheroes.items():
detalle, edad, habilidad, (min_poder, max_poder) = caracteristicas
if max_poder-min_poder>=diferenciapoder and min_poder>umbral:
tupla_datos = (nombre, detalle, habilidad, max_poder)
superheroes_seleccionados.append( tupla_datos )
return superheroes_seleccionados
diferencias_poder(superheroes, 30, 39)
def amigos(asociados):
amigos = {}
for nombre, conjunto_socios in asociados.items():
for nombre_socio in conjunto_socios:
# Ver si son amigos
if (nombre_socio in asociados) and \
(nombre in asociados[nombre_socio]):
# Si son amigos, agregar al diccionario
if nombre not in amigos:
amigos[nombre] = set([nombre_socio])
else:
amigos[nombre].add(nombre_socio)
return amigos
print amigos(asociados)
def versus(superheroes, asociados):
# Crear diccionario con amigos
amigos_dict = amigos(asociados)
# Candidatos a pelear, pero con demasiada informacion
lista_diferencias_poder = diferencias_poder(superheroes, 30, 39)
# Candidatos a pelear, solo nombres
superheroes_poderosos = []
for nombre, _, _, _ in lista_diferencias_poder:
superheroes_poderosos.append(nombre)
# Recorrer y agregar a lista (una unica vez)
lista_versus = []
for nombre_1 in superheroes_poderosos:
for nombre_2 in amigos_dict[nombre_1]:
if nombre_2 in superheroes_poderosos:
tupla_a_agregar = (nombre_2, nombre_1)
tupla_quizas_presente = (nombre_1, nombre_2)
if tupla_quizas_presente not in lista_versus:
lista_versus.append(tupla_a_agregar)
return lista_versus
print versus(superheroes, asociados)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we need to load our data.
Step2: A faster and more concise alternative, using python's list comprehension abilities, would look like this instead
Step3: Either way, we end up with data in the form shown below.
Step4: As you can see, there's one row per trial,
Step5: At this stage, we have our data in a format python can understand, and it looks like this.
Step6: We still need to do some preprocessing of the trajectories - OpenSesame logs y-axis coordinates upside down from what we would want, and more importantly, it's conventional to standardise trajectories so they start at [0,0] and end at [1,1.5], and to flip the trials where the left hand side response was chosen the other way around for comparison. Let's do that now.
Step7: Our next problem as that all of our trials last for different amounts of time.
Step8: We can deal with this in one of two ways, both of which I'll demonstrate.
Step9: An alternative approach is to keep the actual timestamp for each sample, so you can analyse the development of the trajectories in real time. To do this, you need to "extend" the data for all of the trials so that they all last for the same amount of time. In this example, we'll extend every trial to 5 seconds (5000 milliseconds).
Step10: With all of this done, you're ready to calculate the statistics you'll be using in your analyses. Again, don't worry too much about the syntax here.
Step11: Finally, we'll save our processed data. First, we split of our processed mouse trajectory columns into seperate data structures, which I'll explore a little more below.
Step12: With that done, we can delete this information from our main data frame, so that it's compact enough to use easily in your data analysis package of choice, before finally saving everything as csv files.
|
<ASSISTANT_TASK:>
Python Code:
# For reading data files
import os
import glob
import numpy as np # Numeric calculation
import pandas as pd # General purpose data analysis library
import squeak # For mouse data
# For plotting
import matplotlib.pyplot as plt
%matplotlib inline
# Prettier default settings for plots (optional)
import seaborn
seaborn.set_style('darkgrid')
from pylab import rcParams
rcParams['figure.figsize'] = 8, 5
results = []
for datafile in glob.glob('data/*.csv'):
this_data = pd.read_csv(datafile)
results.append(this_data)
data = pd.concat(results)
data = pd.concat(
[pd.DataFrame(pd.read_csv(datafile))
for datafile in glob.glob('data/*.csv')])
print data.head()
data['t'] = data.tTrajectory.map(squeak.list_from_string)
data['x'] = data.xTrajectory.map(squeak.list_from_string)
data['y'] = data.yTrajectory.map(squeak.list_from_string)
for i in range(len(data)):
x = data.x.iloc[i]
y = data.y.iloc[i]
plt.plot(x, y, color='blue', alpha=.5) # alpha controlls the transparency
plt.show()
data['y'] = data.y * -1 # Reverse y axis
data['x'] = data.x.map(squeak.remap_right) # Flip the leftward responses
data['x'] = data.x.map(squeak.normalize_space)
data['y'] = data.y.map(squeak.normalize_space) * 1.5
for i in range(len(data)):
x = data.x.iloc[i]
y = data.y.iloc[i]
plt.plot(x, y, color='blue', alpha=.5)
plt.text(0, 0, 'START', horizontalalignment='center')
plt.text(1, 1.5, 'END', horizontalalignment='center')
plt.show()
for i in range(len(data)):
x = data.x.iloc[i]
t = data.t.iloc[i]
plt.plot(t, x, color='blue', alpha=.3)
plt.xlabel('Time (msec)')
plt.ylabel('x axis position')
plt.show()
data['nx'], data['ny'] = zip(*[squeak.even_time_steps(x, y, t) for x, y, t, in zip(data.x, data.y, data.t)])
for i, x in data.nx.iteritems():
plt.plot(x, color='blue', alpha=.3)
plt.xlabel('Normalized time step')
plt.ylabel('x axis position')
plt.show()
max_time = 5000 # Alternatively, max_time = data.rt.max()
data['rx'] = [squeak.uniform_time(x, t, max_duration=5000) for x, t in zip(data.x, data.t)]
data['ry'] = [squeak.uniform_time(y, t, max_duration=5000) for y, t in zip(data.y, data.t)]
for i in range(len(data)):
x = data.rx.iloc[i]
plt.plot(x.index, x, color='blue', alpha=.3)
plt.xlabel('Time (msec)')
plt.ylabel('x axis position')
plt.show()
# Mouse Stats
data['md'] = data.apply(lambda trial: squeak.max_deviation(trial['nx'], trial['ny']), axis=1)
data['auc'] = data.apply(lambda trial: squeak.auc(trial['nx'], trial['ny']), axis=1)
data['xflips'] = data.nx.map(squeak.count_x_flips)
data['init_time'] = data.ry.map(lambda y: y.index[np.where(y > .05)][0])
# Taking a look at condition means
print data.groupby('condition')['md', 'auc', 'xflips', 'init_time', 'rt'].mean()
nx = pd.concat(list(data.nx), axis=1).T
ny = pd.concat(list(data.ny), axis=1).T
rx = pd.concat(list(data.rx), axis=1).T
ry = pd.concat(list(data.ry), axis=1).T
redundant = ['xTrajectory', 'yTrajectory', 'tTrajectory',
'x', 'y', 't', 'nx', 'ny', 'rx', 'ry']
data = data.drop(redundant, axis=1)
data.head()
# Save data
data.to_csv('processed.csv', index=False)
nx.to_csv('nx.csv', index=False)
ny.to_csv('ny.csv', index=False)
rx.to_csv('rx.csv', index=False)
ry.to_csv('ry.csv', index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The variable data contains returned HTML code (full page) as string. You can process it, save it, or do anything else you need.
Step2: Get JSON data from an API
Step3: The Requests function json() convert the json response to Python dictionary. In next code block is demonstrated how to get data from obtained response.
Step4: Compare the output of the code above, with the example bellow.
Step5: Custom headers
Step6: The request headers can be modified in simple way as follows.
|
<ASSISTANT_TASK:>
Python Code:
from urllib.request import urlopen
r = urlopen('http://www.python.org/')
data = r.read()
print("Status code:", r.getcode())
import requests
r = requests.get("http://www.python.org/")
data = r.text
print("Status code:", r.status_code)
import requests
r = requests.get("http://api.open-notify.org/iss-now.json")
obj = r.json()
print(obj)
s = requests.Session()
print("No cookies on start: ")
print(dict(s.cookies))
r = s.get('http://google.cz/')
print("\nA cookie from google: ")
print(dict(s.cookies))
r = s.get('http://google.cz/?q=cat')
print("\nThe cookie is perstent:")
print(dict(s.cookies))
r = requests.get('http://google.cz/')
print("\nA cookie from google: ")
print(dict(r.cookies))
r = requests.get('http://google.cz/?q=cat')
print("\nDifferent cookie:")
print(dict(r.cookies))
r = requests.get("http://www.python.org/")
print(r.headers)
headers = {
"Accept": "text/plain",
}
r = requests.get("http://www.python.org/", headers=headers)
print(r.status_code)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Principe
Step2: Si $a=47$, on cherche $a',k$ tel que $aa' - nk=1$.
Step3: Notes sur l'inverse de a
Step4: On considère seulement la fonction de décision brute car c'est une fonction qui peut-être calculée à partir d'additions et de multiplications. Pour la suite, nous aurons besoin d'un modèle qui fonctionne sur des variables normalisées avec MinMaxScaler. On supprime également le biais pour le remplacer par une colonne constante.
Step5: Exercice 3
Step6: Exercice 4
Step7: Notes
Step8: Même distribution dans un ordre différent. Pour changer cette distribution, on ajoute un petit bruit peu important pour la variable numérique considérée mais qui sera cryptée de manière totalement différente.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from jyquickhelper import add_notebook_menu
add_notebook_menu()
def compose(x, a, n):
return (a * x) % n
def crypt(x):
return compose(x, 577, 10000)
crypt(5), crypt(6)
crypt(5+6), (crypt(5) + crypt(6)) % 10000
crypt(6-5), (crypt(6) - crypt(5)) % 10000
crypt(5-6), (crypt(5) - crypt(6)) % 10000
n = 10000
for k in range(2, n):
if (577*k) % n == 1:
ap = k
break
ap
def decrypt(x):
return compose(x, 2513, 10000)
decrypt(crypt(5)), decrypt(crypt(6))
decrypt(crypt(5)*67), decrypt(crypt(5*67))
from sklearn.datasets import load_diabetes
data = load_diabetes()
X = data.data
Y = data.target
from sklearn.linear_model import LinearRegression
clr = LinearRegression()
clr.fit(X, Y)
clr.predict(X[:1]), Y[0]
from sklearn.metrics import r2_score
r2_score(Y, clr.predict(X))
from sklearn.preprocessing import MinMaxScaler
import numpy
X_norm = numpy.hstack([MinMaxScaler((0, 100)).fit_transform(X),
numpy.ones((X.shape[0], 1))])
Y_norm = MinMaxScaler((0, 100)).fit_transform(Y.reshape(len(Y), 1)).ravel()
Y_norm.min(), Y_norm.max()
clr_norm = LinearRegression(fit_intercept=False)
clr_norm.fit(X_norm, Y_norm)
clr_norm.predict(X_norm[:1]), Y_norm[0]
from sklearn.metrics import r2_score
r2_score(Y_norm, clr_norm.predict(X_norm))
def decision_linreg(xs, coef, bias):
s = bias
xs = xs.copy().ravel()
coef = coef.copy().ravel()
if xs.shape != coef.shape:
raise ValueError("Not the same dimension {0}!={1}".format(xs.shape, coef.shape))
for x, c in zip(xs, coef):
s += c * x
return s
list(X[0])[:5]
clr.predict(X[:1]), decision_linreg(X[:1], clr.coef_, clr.intercept_)
clr_norm.predict(X_norm[:1]), decision_linreg(X_norm[:1], clr_norm.coef_, clr_norm.intercept_)
coef_int = [int(i) for i in clr_norm.coef_ * 100]
coef_int
inter_int = int(clr_norm.intercept_ * 10000)
inter_int
import numpy
def decision_linreg_int(xs, coef):
s = 0
for x, c in zip(xs, coef):
s += c * x
return s % 10000
def decision_crypt_decrypt_linreg(xs, coef_int):
# On crypte les entrées
int_xs = [int(x) for x in xs.ravel()]
crypt_xs = [crypt(i) for i in int_xs]
# On applique la prédiction.
pred = decision_linreg_int(crypt_xs, coef_int)
# On décrypte.
dec = decrypt(pred % 10000)
return dec / 100
(decision_linreg(X_norm[:1], clr_norm.coef_, clr_norm.intercept_),
decision_crypt_decrypt_linreg(X_norm[0], coef_int))
p1s = []
p2s = []
for i in range(0, X_norm.shape[0]):
p1 = decision_linreg(X_norm[i:i+1], clr_norm.coef_, clr_norm.intercept_)
p2 = decision_crypt_decrypt_linreg(X_norm[i], coef_int)
if i < 5:
print(i, p1, p2)
p1s.append(p1)
p2s.append(p2)
import matplotlib.pyplot as plt
plt.plot(p1s, p2s, '.')
from numpy.random import poisson
X = poisson(size=10000)
mx = X.max()+1
X.min(), mx
from matplotlib import pyplot as plt
plt.hist(X, bins=mx, rwidth=0.9);
def crypt(x):
return compose(x, 5794, 10000)
import numpy
Xcrypt = numpy.array([crypt(x) for x in X])
Xcrypt[:10]
plt.hist(Xcrypt, bins=mx, rwidth=0.9);
import random
Xbruit = numpy.array([100*x + random.randint(0,100) for x in X])
Xbruit[:10]
fix, ax = plt.subplots(1, 2, figsize=(12,4))
ax[0].hist(Xbruit, bins=mx, rwidth=0.9)
ax[1].hist(Xbruit, bins=mx*100);
Xbruitcrypt = numpy.array([crypt(x) for x in Xbruit])
fix, ax = plt.subplots(1, 2, figsize=(12,4))
ax[0].hist(Xbruitcrypt, bins=mx, rwidth=0.9)
ax[1].hist(Xbruitcrypt, bins=mx*100);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Data validation
Step3: Exploratory Data Analysis
Step4: SegmentStatus dataframe
Step5: It's likely that there is a sort of "snake effect" in the data, i.e. the traffic flows and there aren't jams at the same timestamp in all the segments of the road. Because of this, I will introduce a "snake_parameter" in the RoadStatus class, so the packing_index will be the result of the average of the first snake_parameter segments ordered by packing index desc
Step6: Jam dataframe
|
<ASSISTANT_TASK:>
Python Code:
def get_status(dt, category=None):
returns road status given specific datetime
if category:
return db.session.query(RoadStatus).filter(RoadStatus.timestamp > dt.strftime('%s')).\
filter(RoadStatus.timestamp < (dt+timedelta(0,60)).strftime('%s')).\
filter(RoadStatus.category == category).all()
else:
return db.session.query(RoadStatus).filter(RoadStatus.timestamp > dt.strftime('%s')).\
filter(RoadStatus.timestamp < (dt+timedelta(0,60)).strftime('%s')).all()
def get_segments(dt, category=None):
prints segment statuses given a specific datetime
for status in db.session.query(SegmentStatus).filter(SegmentStatus.timestamp > dt.strftime('%s')).\
filter(SegmentStatus.timestamp < (dt+timedelta(0,60)).strftime('%s')):
if status.segment.category == category or category == None:
print status.segment.category, status.packing_index
def printall(dt, category=None):
print get_status(dt, category)
get_segments(dt, category)
printall(datetime(2016,8,16,18,40)) #accident
printall(datetime(2016,8,16,10,43), category='Arrive')
printall(datetime(2016,8,15,19,39), category='Leave')
printall(datetime(2016,8,12,19,20), category='Leave')
printall(datetime(2016,8,12,19,20), 'Leave')
printall(datetime(2016,8,13,21,15), 'Leave') #no traffic
printall(datetime(2016,8,13,9,47), 'Arrive') #slow down
printall(datetime(2016,8,11,12,38), 'Arrive') #no traffic
printall(datetime(2016,8,11,11,20), 'Arrive')
printall(datetime(2016,8,9,8,35), 'Arrive')
printall(datetime(2016,8,7,20,0)) #serious accident
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
qs = db.session.query(SegmentStatus).join(Segment).\
filter(SegmentStatus.timestamp > datetime(2016,8,4,0,0).strftime('%s')).\
filter(SegmentStatus.timestamp < datetime(2016,8,16,0,0).strftime('%s'))
ds = pd.read_sql(qs.statement, qs.session.bind)
ds.set_index('id', inplace=True)
ds['timestamp'] = pd.to_datetime(ds['timestamp'],unit='s')
ds.head()
#Let's check how many segments can have a packing index <> 0 in the same timestamp/category
ds = ds[(ds['packing_index'] > 0)]
ds1 = pd.DataFrame(ds['packing_index'].groupby([ds.road_status_id]).count())
ds1 = pd.DataFrame(ds1['packing_index'].groupby([ds1.packing_index]).count())
ds1.head()
qr = db.session.query(RoadStatus).filter(RoadStatus.timestamp > datetime(2016,8,4,0,0).strftime('%s')).\
filter(RoadStatus.timestamp < datetime(2016,8,16,0,0).strftime('%s'))
dr = pd.read_sql(qr.statement, qr.session.bind)
dr.set_index('id', inplace=True)
dr['timestamp'] = pd.to_datetime(dr['timestamp'],unit='s')
dr.sort([('packing_index')], ascending=False).head()
dr.dtypes
dr.describe()
plt.figure(figsize=(18,5))
plt.subplot(1, 3, 1)
plt.xlabel('packing index')
plt.boxplot(dr[dr.packing_index>0].packing_index.reset_index()['packing_index'], showmeans=True, showfliers=True)
plt.show()
time = pd.DatetimeIndex(dr.timestamp)
dr_plt = dr.groupby([time.hour]).mean()
dr_plt.reset_index(inplace=True)
fig = plt.figure(figsize=(15,5))
ax = plt.gca()
dr_plt.plot(x='index', y='packing_index', ax=ax)
plt.title("Hourly average packing_index")
plt.ylabel('Packing index')
plt.xlabel('Hour')
ax.set_xticks(range(23))
plt.show()
time = pd.DatetimeIndex(dr.timestamp)
dr_plt = dr.groupby([time.weekday]).mean()
dr_plt.reset_index(inplace=True)
dayDict = {0:'Mon', 1:'Tue', 2:'Wed', 3:'Thu', 4:'Fri', 5:'Sat', 6:'Sun'}
def f(x):
daylabel = dayDict[x]
return daylabel
dr_plt['daylabel'] = dr_plt['index'].apply(f)
fig = plt.figure(figsize=(15,5))
ax = plt.gca()
dr_plt.plot(x='daylabel', y='packing_index', ax=ax)
plt.title("average packing_index per weekday")
plt.ylabel('Packing index')
plt.xlabel('Day')
ax.set_xticks(range(6))
plt.show()
time = pd.DatetimeIndex(dr.timestamp)
dr_plt = dr.groupby([time.day]).mean()
dr_plt.reset_index(inplace=True)
fig = plt.figure(figsize=(15,5))
ax = plt.gca()
dr_plt.plot(x='index', y='packing_index', ax=ax)
plt.title("average packing_index per day")
plt.ylabel('Packing index')
plt.xlabel('Day (August 2016)')
ax.set_xticks(range(18))
plt.show()
qj = db.session.query(Jam).filter(Jam.timestamp > datetime(2016,8,4,0,0).strftime('%s')).\
filter(Jam.timestamp < datetime(2016,8,16,0,0).strftime('%s'))
dj = pd.read_sql(qj.statement, qj.session.bind)
dj.set_index('id', inplace=True)
dj['timestamp'] = pd.to_datetime(dj['timestamp'],unit='s')
dj.head()
time = pd.DatetimeIndex(dj.timestamp)
dj['day']=time.day
dj['hour']=time.hour
dj_time = dj.groupby([dj.day, dj.hour, dj.startLongitude, dj.endLongitude, dj.startLatitude, dj.endLatitude, dj.street, dj.severity, dj.color, dj.source, dj.direction]).count()
print 'Average traffic duration: %.2f min' % dj_time['timestamp'].mean()
dj_dur = pd.DataFrame(dj_time['timestamp'])
dj_dur.reset_index(inplace=True)
dj_dur = dj_dur[['hour', 'timestamp']]
dj_dur.columns=['hour', 'duration']
dj_dur = dj_dur.groupby([dj_dur.hour]).mean()
dj_dur.reset_index(inplace=True)
dj_dur.head()
fig = plt.figure(figsize=(15,5))
ax = plt.gca()
dj_dur.plot(x='hour', y='duration', ax=ax)
plt.title("Average jam duration per hour")
plt.ylabel('Duration [min]')
plt.xlabel('Hour')
ax.set_xticks(range(23))
plt.show()
pd.scatter_matrix(dr, alpha=0.2, figsize=(18, 18), diagonal='kde')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You probably can figure out what's going on here
Step2: Ranges
Step3: You can specify an optional third step value if you want to skip over values in your range. Write some code below to display even numbers between 1 and 10
Step4: if ... elif ... else
Step5: Write some code to iterate through numlist, print positive numbers and ignore negative numbers. To write an if statement, start with the if keyword followed by the condition to be tested. Just like with for loops, you need to type a colon before you start to write your nested code.
Step6: You can input one or more elif statements to test for additional conditions when your if statment evaluates to False. Each elif statement must be nested by the same amount as the if statement. You may have only one if statement but can have as many elif statements as you like to test for multiple conditions. Python tests your if and elif conditions in order. As soon as one evaluates to True, all subsequent elif statements are ignored.
Step7: Functions
Step8: The first thing our function needs to do is remove $ and commas from value. Since value is a string, we can use the replace method to help us. If you can't figure out what to do, type print(help(str.replace)) in any code box below and run that code to see how to use replace. If you get stuck, read below for the answer
Step9: Your code should look something like this
Step10: Now, let's test our function by passing in some strings. To call a function, just type the name of the function with the value we want to pass to it in parentheses. To call the function several times at ones, put each function call inside a print statement.
|
<ASSISTANT_TASK:>
Python Code:
numlist = [1, 2, 3, 4, 5]
for item in numlist:
print(item)
print('All done!')
sublist = ['a', 'b', 'c']
for item in numlist:
for subitem in sublist:
print(item, subitem)
print('All done!')
for num in range(1, 11):
print(num)
for num in range(2, 11, 2):
print(num)
numlist = [39, -36, -33, 99, 24, 40, 22, -4, -11, -20]
for num in numlist:
if num >= 0:
print(num)
for num in numlist:
if num >= 0:
print(num, 'is positive')
elif num > -26:
print(num, 'is slightly negative')
else:
print(num, 'is very negative')
def clean_money_string(value):
pass
def clean_money_string(value):
return_value = value.replace('$', '').replace(',', '')
def clean_money_string(value):
return_value = value.replace('$', '').replace(',', '')
try:
return_value = round(float(return_value))
except:
return_value = 0
return return_value
print(clean_money_string('$1,234,567.89'))
print(clean_money_string('$1,234,567.39'))
print(clean_money_string(''))
print(clean_money_string('n/a'))
print(clean_money_string('$'))
print(clean_money_string('93,w64,092.22'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First define the $\hat{\sigma}_{ij}$ operators according to atomic levels shown in the following figure. Decay rates are also indicated.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from qutip import *
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import Image
one, two, three = three_level_basis()
sig11 = one * one.dag()
sig22 = two * two.dag()
sig33 = three * three.dag()
sig13 = one * three.dag()
sig23 = two * three.dag()
sig12 = one * two.dag()
Image("fig/EIT_levels.png")
# Loop over the detuning Delta and generate optical response
# constants based on Jenkins EIT masters thesis
# It seems like the values they quote aren't right. They don't match the code.
# Decay rates
Gamma = 0.1 # decay rate on probe transition
gamma = 5.0 # decay rate on control transition
gamma_c = 0.05 # coherence decay rate (2->1)
phi = 0 # control field detuning
# Rabi frequencies for pump and control beam
omegaP = 0.1*gamma # pump
omegaC = 1.0*gamma # control
# Collapse operators
c1 = np.sqrt(Gamma)*sig13 # 1-3 coherence decay (probe transition)
c2 = np.sqrt(gamma)*sig23 # 2-3 coherence decay (control transition)
c3 = np.sqrt(gamma_c)*sig12 # ground state coherence decay
collapse = [c1,c2,c3]
deltalist = np.linspace(-20,20,301)
rho_33 = []
rho_31 = []
rho_13 = []
chi = []
for Delta in deltalist:
# Define Hamiltonian for detuning Delta:
H = (omegaP*(sig13+sig13.dag()) + omegaC*(sig23+sig23.dag()) + phi*(sig33 - sig22) + Delta*(sig33 - sig11))
# find steady-state density operator:
rho_ss = steadystate(H,collapse)
# save values for plotting later:
rho_33.append(rho_ss[2,2]) # save the exited state population
rho_31.append(rho_ss[2,0]) # save the ground-excited coherence
rho_13.append(rho_ss[0,2])
# this approach is ugly because it relies on the internal matrix layout, better to use projection ops:
chi.append(expect(sig13.dag(),rho_ss)) # use sig31 ( which is sig13.dag() )
# This is not equality, but the susceptability is proportional to sig31
fsize = {'fontsize':'16'}
plt.plot(deltalist/gamma,np.real(rho_33))
plt.title("Excited state population")
plt.xlabel("$\Delta/\gamma$",**fsize)
plt.ylabel("$\hat{\\rho}_{33}$",**fsize)
fsize = {'fontsize':'16'}
plt.plot(deltalist/gamma,np.real(rho_33))
plt.plot(deltalist/gamma,np.imag(rho_13))
plt.title("Excited state population")
plt.xlabel("$\Delta/\gamma$",**fsize)
#plt.ylabel("$\hat{\\rho}_{31}$",**fsize)
# This is probably the simplest official way to do it.
plt.plot(deltalist,np.real(chi),label="Refractive Index")
plt.plot(deltalist,np.imag(chi),label="Absorption")
plt.legend()
#plt.title("Nonlinear response")
plt.xlabel("$\Delta$")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: -15" of CC_Y went to +15" of CC_X; +10" of CC_X went to -10" of CC_Y
|
<ASSISTANT_TASK:>
Python Code:
p1000_set1_files = [
"sog_ff_cal_img_2017.1207.083225.fits",
"sog_ff_cal_img_2017.1207.083329.fits",
"sog_ff_cal_img_2017.1207.083411.fits"
]
m1000_set1_files = [
"sog_ff_cal_img_2017.1207.083508.fits",
"sog_ff_cal_img_2017.1207.083545.fits",
"sog_ff_cal_img_2017.1207.083625.fits"
]
m1000_set2_files = [
"sog_ff_cal_img_2017.1207.083737.fits",
"sog_ff_cal_img_2017.1207.083813.fits",
"sog_ff_cal_img_2017.1207.083848.fits"
]
p1000_set2_files = [
"sog_ff_cal_img_2017.1207.083948.fits",
"sog_ff_cal_img_2017.1207.084024.fits",
"sog_ff_cal_img_2017.1207.084059.fits"
]
p1000_set1_data = []
for f in p1000_set1_files:
hdu = fits.open(f)
data = hdu[1].data
header = hdu[1].header
p1000_set1_data.append(data)
p1000_set1_data = np.array(p1000_set1_data)
p1000_set2_data = []
for f in p1000_set2_files:
hdu = fits.open(f)
data = hdu[1].data
header = hdu[1].header
p1000_set2_data.append(data)
p1000_set2_data = np.array(p1000_set2_data)
m1000_set1_data = []
for f in m1000_set1_files:
hdu = fits.open(f)
data = hdu[1].data
header = hdu[1].header
m1000_set1_data.append(data)
m1000_set1_data = np.array(m1000_set1_data)
m1000_set2_data = []
for f in m1000_set2_files:
hdu = fits.open(f)
data = hdu[1].data
header = hdu[1].header
m1000_set2_data.append(data)
m1000_set2_data = np.array(m1000_set2_data)
plt.imshow(p1000_set2_data[0][253-128:253+128, 296-128:296+128], origin="lower")
plt.show()
size = 90
inner = 15
y, x = np.ogrid[-size:size, -size:size]
mask_in = x*x + y*y <= inner*inner
mask_out = x*x + y*y >= size*size
p1000_set1_med = np.median(p1000_set1_data, axis=0)[260-size:260+size,289-size:289+size]
p1000_set2_med = np.median(p1000_set2_data, axis=0)[260-size:260+size,289-size:289+size]
m1000_set1_med = np.median(m1000_set1_data, axis=0)[260-size:260+size,289-size:289+size]
m1000_set2_med = np.median(m1000_set2_data, axis=0)[260-size:260+size,289-size:289+size]
for im in [p1000_set1_med, p1000_set2_med, m1000_set1_med, m1000_set2_med]:
im -= np.median(im[:,:10])
im[im < 0] = 0.
# im[mask_in] = 0.
#| im[mask_out] = 0.
plt.imshow(p1000_set1_med - m1000_set1_med, origin="lower")
plt.show()
p1000_set2_med.shape
set1 = p1000_set1_med - m1000_set1_med
set2 = p1000_set2_med - m1000_set2_med
fits.writeto("p1000_s1.fits", p1000_set1_med, overwrite=True)
fits.writeto("p1000_s2.fits", p1000_set2_med, overwrite=True)
fits.writeto("m1000_s1.fits", m1000_set1_med, overwrite=True)
fits.writeto("m1000_s2.fits", m1000_set2_med, overwrite=True)
fieldXY = [0., 0.]
I1 = Image(readFile("p1000_s2.fits"), fieldXY, Image.INTRA)
I2 = Image(readFile("m1000_s2.fits"), fieldXY, Image.EXTRA)
d = fits.open("p1000_s1.fits")[-1].data
Image(d, fieldXY, Image.INTRA)
fig, ax = plt.subplots()
im = ax.imshow(I1.image, origin='lower', cmap='RdBu')
cbar = fig.colorbar(im)
fig.show()
t = MMT()
pup_mask = t.pupil_mask(size=125)
x, y, cfig = center_pupil(I1.image, pup_mask)
print(int(np.round(x)), int(np.round(y)))
cfig.show()
plt.imshow(pup_mask)
plt.show()
mmt = Instrument('mmto', I1.sizeinPix)
# this is a hack. 0.0 doesn't work, but this will yield annular zernike solution that is very close to circular.
mmt.obscuration = 0.01
algo = Algorithm('exp', mmt, 3)
algo.runIt(mmt, I1, I2, 'onAxis')
print(algo.zer4UpNm)
plots.plotZer(algo.zer4UpNm, 'nm')
zv = ZernikeVector()
zv.from_array(algo.zer4UpNm, modestart=4, normalized=True)
zv.denormalize()
zv
zv.fringe_bar_chart().show()
plots.plotImage(algo.Wconverge, "Final wavefront", show=False)
I1 = Image(readFile("p1000_s2.fits"), fieldXY, Image.INTRA)
I2 = Image(readFile("m1000_s2.fits"), fieldXY, Image.EXTRA)
algo2 = Algorithm('fft', mmt, 3)
algo2.itr0(mmt, I1, I2, 'onAxis')
plots.plotImage(algo2.S, 'wavefront signal')
plt.close('all')
mmt.offset
fig, ax = plt.subplots()
plt.imshow(algo.Wconverge, origin="lower")
plt.show()
ax.set_title?
im = fits.open("/Users/tim/MMT/wfsdat/20180209/sog_ff_cal_img_2018.0209.021854.fits")[-1].data
plt.imshow(im, origin='lower')
plt.show()
t = MMT()
pup_mask = t.pupil_mask(size=120)
x, y, f = center_pupil(im, pup_mask, threshold=0.8)
x, y
f.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 10,000 bitcoin network
Step3: Post-processing the data pulled from BigQuery
Step4: Visualizing the network
Step5: We use the library networkx below to visualize a network of transactions following the pizza purchase by up to 2 degrees (As noted here, our code may return a deeper graph than the target depth if there are repeated transactions from wallet a to b or or self transactions (a -> a)).
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from google.cloud import bigquery
bq = bigquery.Client()
QUERY_TEMPLATE =
SELECT
timestamp,
inputs.input_pubkey_base58 AS input_key,
outputs.output_pubkey_base58 AS output_key,
outputs.output_satoshis as satoshis
FROM `bigquery-public-data.bitcoin_blockchain.transactions`
JOIN UNNEST (inputs) AS inputs
JOIN UNNEST (outputs) AS outputs
WHERE inputs.input_pubkey_base58 IN UNNEST({0})
AND outputs.output_satoshis >= {1}
AND inputs.input_pubkey_base58 IS NOT NULL
AND outputs.output_pubkey_base58 IS NOT NULL
GROUP BY timestamp, input_key, output_key, satoshis
def trace_transactions(target_depth, seeds, min_satoshi_per_transaction):
Trace transactions associated with a given bitcoin key.
To limit the number of BigQuery calls, this function ignores time.
If you care about the order of transactions, you'll need to do post-processing.
May return a deeper graph than the `target_depth` if there are repeated transactions
from wallet a to b or or self transactions (a -> a).
MAX_SEEDS_PER_QUERY = 500
query = QUERY_TEMPLATE.format(seeds, min_satoshi_per_transaction)
#print(f'Estimated total query size: {int(bq_assist.estimate_query_size(query)) * MAX_DEPTH}')
results = []
seeds_scanned = set()
for i in range(target_depth):
seeds = seeds[:MAX_SEEDS_PER_QUERY]
print("Now scanning {} seeds".format(len(seeds)))
query = QUERY_TEMPLATE.format(seeds, min_satoshi_per_transaction)
transactions = bq.query(query).to_dataframe()
results.append(transactions)
# limit query kb by dropping any duplicated seeds
seeds_scanned.update(seeds)
seeds = list(set(transactions.output_key.unique()).difference(seeds_scanned))
return pd.concat(results).drop_duplicates()
MAX_DEPTH = 2
BASE_SEEDS = ['1XPTgDRhN8RFnzniWCddobD9iKZatrvH4']
SATOSHI_PER_BTC = 10**7
df = trace_transactions(MAX_DEPTH, BASE_SEEDS, 0)
df.size
df['date_time'] = pd.to_datetime(df.timestamp * 1000000)
df.head(3)
df.head(3).to_csv('transactions.csv')
def dig_row(row, seeds, min_satoshis, trace_from_key):
if row['satoshis'] < min_satoshis:
return None
if trace_from_key and row['input_key'] not in seeds:
return None
elif not trace_from_key and row['output_key'] not in seeds:
return None
seeds.add(row['output_key'])
return row
def single_pass_dig(initial_seeds, input_df, initial_datetime=None, min_satoshis=0, trace_from_key=True):
df = input_df.copy()
active_seeds = set(initial_seeds)
if trace_from_key and initial_datetime is not None:
df = df[df['date_time'] >= initial_datetime]
elif not(trace_from_key) and initial_datetime is not None:
df = df[df['date_time'] <= initial_datetime]
df.sort_values(by=['timestamp'], ascending=trace_from_key, inplace=True)
transactions = []
for index, row in df.iterrows():
rv = dig_row(row, active_seeds, min_satoshis, trace_from_key)
if rv is not None:
transactions.append(rv)
return pd.DataFrame(transactions)
future_transactions = single_pass_dig(BASE_SEEDS, df,
initial_datetime=pd.to_datetime("May 16, 2010"))
total_flows = future_transactions[['input_key', 'output_key', 'satoshis']].groupby(
by=['input_key', 'output_key']).sum().reset_index()
total_flows.head(3)
total_flows.info()
total_flows.head(3).to_csv('total_flows.csv')
## purchase address
hanyecz = "1XPTgDRhN8RFnzniWCddobD9iKZatrvH4"
import networkx as nx
import matplotlib.pyplot as plt
graph = nx.from_pandas_edgelist(total_flows,
source = "input_key",
target = "output_key",
create_using = nx.DiGraph())
pos = nx.kamada_kawai_layout(graph)
## color the hanyecz red and the rest of the nodes blue
node_colors = list(map(lambda x: "red" if x == hanyecz else "blue", graph.nodes))
## calculate width based on satoshis
satoshi_stats = total_flows.describe()['satoshis']
def get_width(x, stats):
if x < stats['25%']:
return 1
elif x >= stats['25%'] and x < stats['50%']:
return 2
elif x >= stats['50%'] and x < stats['75%']:
return 3
else:
return 4
edge_widths = total_flows['satoshis'].apply(lambda x: get_width(x, satoshi_stats))
f = plt.figure()
nx.draw_networkx(graph, pos,
with_labels = False,
alpha = 0.5,
node_size = 150,
node_color = node_colors,
width = edge_widths / 4,
arrowsize = 20,
ax=f.add_subplot(111))
plt.title("BTC inputs upstream of pizza purchase address (red)")
plt.axis('off')
f.savefig("graph.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Feature Transformation
Step2: The explained variance is high for the first two dimensions (45.96 % and 40.52 %, respectively), but drops significantly beginning with the third dimension (7.00 % for the third, 4.40 % for the fourth dimension). Thus, the first two components explain already 86.5 % of the variation in the data.
Step3: The first dimension seems to basically represent only the 'fresh'-feature, as this feature has a strong negative projection on the first dimension. The other features have rather weak (mostly negative) projections on the first dimension. That is, the first dimension basically tells us whether the 'fresh'-feature value is high or low, mixed with a little bit of information from the other features.
Step4: The first vector [-0.04771087 0.00496636 0.00492989 0.00208307 -0.0059068 0.00159593] again represents mainly the 'fresh'-feature, with a coefficient of -0.0477. The other features have a rather weak projection on the first dimension.
Step5: The first cluster contains customers that have vastly (around 3 times) higher spendings in the 'Fresh'-category compared to the average, indicating that those customers specialize in selling fresh products. Also, customers in this cluster tend to place many orders in the 'Frozen'- and 'Delicatessen'-Category, but relatively few in the 'Detergents and Paper'-category.
Step6: Quantifying the quality of clustering via silhouette plots
Step7: Thus our clustering with 3 centroids is good.
Step8: Also the regions are 3 which validates our assumption.
Step9: K Means
Step10: Affinity Propogation
Step11: MeanShift
Step12: Mixture of Guassian Models
Step13:
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries: NumPy, pandas, matplotlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Tell iPython to include plots inline in the notebook
%matplotlib inline
# read .csv from provided dataset
csv_filename="Wholesale customers data.csv"
# df=pd.read_csv(csv_filename,index_col=0)
df=pd.read_csv(csv_filename)
df.head()
features = df.columns[2:]
features
data = df[features]
print(data.head(5))
# Apply PCA with the same number of dimensions as variables in the dataset
from sklearn.decomposition import PCA
pca = PCA(n_components=6) # 6 components for 6 variables
pca.fit(data)
# Print the components and the amount of variance in the data contained in each dimension
print(pca.components_)
print(pca.explained_variance_ratio_)
plt.plot(list(pca.explained_variance_ratio_),'-o')
plt.title('Explained variance ratio as function of PCA components')
plt.ylabel('Explained variance ratio')
plt.xlabel('Component')
plt.show()
# Fit an ICA model to the data
# Note: Adjust the data to have center at the origin first!
def center_data(data, rescale = 0):
centeredData = data.copy()
for col in centeredData.columns:
centeredData[col] = (centeredData[col] - np.mean(centeredData[col]))/ (1 + rescale * np.std(centeredData[col]))
return centeredData
from sklearn.decomposition import FastICA
#data_centered = center_data(data)
ica = FastICA(n_components=6, whiten=True)
ica.fit(center_data(data,0))
# Print the independent components
print(ica.components_)
# Print the independent components (rescaled again)
print('Independent components scaled with mean')
print(np.multiply(ica.components_,list(np.mean(data))))
# Import clustering modules
from sklearn.cluster import KMeans
from sklearn.mixture import GMM
# First we reduce the data to two dimensions using PCA to capture variation
pca = PCA(n_components=2)
reduced_data = pca.fit_transform(data)
print(reduced_data[:10]) # print upto 10 elements
# Implement your clustering algorithm here, and fit it to the reduced data for visualization
# The visualizer below assumes your clustering object is named 'clusters'
# TRIED OUT 2,3,4,5,6 CLUSTERS AND CONCLUDED THAT 3 CLUSTERS ARE A SENSIBLE CHOICE BASED ON VISUAL INSPECTION, SINCE
# WE OBTAIN ONE CENTRAL CLUSTER AND TWO CLUSTERS THAT SPREAD FAR OUT IN TWO DIRECTIONS.
kmeans = KMeans(n_clusters=3)
clusters = kmeans.fit(reduced_data)
print(clusters)
# Plot the decision boundary by building a mesh grid to populate a graph.
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
hx = (x_max-x_min)/1000.
hy = (y_max-y_min)/1000.
xx, yy = np.meshgrid(np.arange(x_min, x_max, hx), np.arange(y_min, y_max, hy))
# Obtain labels for each point in mesh. Use last trained model.
Z = clusters.predict(np.c_[xx.ravel(), yy.ravel()])
# Find the centroids for KMeans or the cluster means for GMM
centroids = kmeans.cluster_centers_
print('*** K MEANS CENTROIDS ***')
print(centroids)
# TRANSFORM DATA BACK TO ORIGINAL SPACE FOR ANSWERING 7
print('*** CENTROIDS TRANSFERED TO ORIGINAL SPACE ***')
print(pca.inverse_transform(centroids))
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('Clustering on the wholesale grocery dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
X = df[features]
y = df['Region']
distortions = []
for i in range(1, 11):
km = KMeans(n_clusters=i,
init='k-means++',
n_init=10,
max_iter=300,
random_state=0)
km.fit(X)
distortions .append(km.inertia_)
plt.plot(range(1,11), distortions , marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.tight_layout()
#plt.savefig('./figures/elbow.png', dpi=300)
plt.show()
import numpy as np
from matplotlib import cm
from sklearn.metrics import silhouette_samples
km = KMeans(n_clusters=3,
init='k-means++',
n_init=10,
max_iter=300,
tol=1e-04,
random_state=0)
y_km = km.fit_predict(X)
cluster_labels = np.unique(y_km)
n_clusters = cluster_labels.shape[0]
silhouette_vals = silhouette_samples(X, y_km, metric='euclidean')
y_ax_lower, y_ax_upper = 0, 0
yticks = []
for i, c in enumerate(cluster_labels):
c_silhouette_vals = silhouette_vals[y_km == c]
c_silhouette_vals.sort()
y_ax_upper += len(c_silhouette_vals)
color = cm.jet(i / n_clusters)
plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0,
edgecolor='none', color=color)
yticks.append((y_ax_lower + y_ax_upper) / 2)
y_ax_lower += len(c_silhouette_vals)
silhouette_avg = np.mean(silhouette_vals)
plt.axvline(silhouette_avg, color="red", linestyle="--")
plt.yticks(yticks, cluster_labels + 1)
plt.ylabel('Cluster')
plt.xlabel('Silhouette coefficient')
plt.tight_layout()
# plt.savefig('./figures/silhouette.png', dpi=300)
plt.show()
y.unique()
from sklearn.cluster import AgglomerativeClustering
ac = AgglomerativeClustering(n_clusters=3, affinity='euclidean', linkage='complete')
labels = ac.fit_predict(X)
print('Cluster labels: %s' % labels)
from sklearn.cross_validation import train_test_split
X = df[features]
y = df['Region']
X_train, X_test, y_train, y_test = train_test_split(X, y ,test_size=0.25, random_state=42)
from sklearn import cluster
clf = cluster.KMeans(init='k-means++', n_clusters=3, random_state=5)
clf.fit(X_train)
print (clf.labels_.shape)
print (clf.labels_)
# Predict clusters on testing data
y_pred = clf.predict(X_test)
from sklearn import metrics
print ("Addjusted rand score:{:.2}".format(metrics.adjusted_rand_score(y_test, y_pred)))
print ("Homogeneity score:{:.2} ".format(metrics.homogeneity_score(y_test, y_pred)) )
print ("Completeness score: {:.2} ".format(metrics.completeness_score(y_test, y_pred)))
print ("Confusion matrix")
print (metrics.confusion_matrix(y_test, y_pred))
# Affinity propagation
aff = cluster.AffinityPropagation()
aff.fit(X_train)
print (aff.cluster_centers_indices_.shape)
y_pred = aff.predict(X_test)
from sklearn import metrics
print ("Addjusted rand score:{:.2}".format(metrics.adjusted_rand_score(y_test, y_pred)))
print ("Homogeneity score:{:.2} ".format(metrics.homogeneity_score(y_test, y_pred)) )
print ("Completeness score: {:.2} ".format(metrics.completeness_score(y_test, y_pred)))
print ("Confusion matrix")
print (metrics.confusion_matrix(y_test, y_pred))
ms = cluster.MeanShift()
ms.fit(X_train)
print (ms.cluster_centers_)
y_pred = ms.predict(X_test)
from sklearn import metrics
print ("Addjusted rand score:{:.2}".format(metrics.adjusted_rand_score(y_test, y_pred)))
print ("Homogeneity score:{:.2} ".format(metrics.homogeneity_score(y_test, y_pred)) )
print ("Completeness score: {:.2} ".format(metrics.completeness_score(y_test, y_pred)))
print ("Confusion matrix")
print (metrics.confusion_matrix(y_test, y_pred))
from sklearn import mixture
# Define a heldout dataset to estimate covariance type
X_train_heldout, X_test_heldout, y_train_heldout, y_test_heldout = train_test_split(
X_train, y_train,test_size=0.25, random_state=42)
for covariance_type in ['spherical','tied','diag','full']:
gm=mixture.GMM(n_components=3, covariance_type=covariance_type, random_state=42, n_init=5)
gm.fit(X_train_heldout)
y_pred=gm.predict(X_test_heldout)
print ("Adjusted rand score for covariance={}:{:.2}".format(covariance_type,
metrics.adjusted_rand_score(y_test_heldout, y_pred)))
gm = mixture.GMM(n_components=3, covariance_type='tied', random_state=42)
gm.fit(X_train)
# Print train clustering and confusion matrix
y_pred = gm.predict(X_test)
print ("Addjusted rand score:{:.2}".format(metrics.adjusted_rand_score(y_test, y_pred)))
print ("Homogeneity score:{:.2} ".format(metrics.homogeneity_score(y_test, y_pred)) )
print ("Completeness score: {:.2} ".format(metrics.completeness_score(y_test, y_pred)))
print ("Confusion matrix")
print (metrics.confusion_matrix(y_test, y_pred))
pl=plt
from sklearn import decomposition
# In this case the seeding of the centers is deterministic,
# hence we run the kmeans algorithm only once with n_init=1
pca = decomposition.PCA(n_components=2).fit(X_train)
reduced_X_train = pca.transform(X_train)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .01 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will asign a color to each
x_min, x_max = reduced_X_train[:, 0].min() + 1, reduced_X_train[:, 0].max() - 1
y_min, y_max = reduced_X_train[:, 1].min() + 1, reduced_X_train[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
gm.fit(reduced_X_train)
#print np.c_[xx.ravel(),yy.ravel()]
Z = gm.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1)
pl.clf()
pl.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=pl.cm.Paired,
aspect='auto', origin='lower')
#print reduced_X_train.shape
pl.plot(reduced_X_train[:, 0], reduced_X_train[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = gm.means_
pl.scatter(centroids[:, 0], centroids[:, 1],
marker='.', s=169, linewidths=3,
color='w', zorder=10)
pl.title('Mixture of gaussian models on the seeds dataset (PCA-reduced data)\n'
'Means are marked with white dots')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
pl.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Lecture 23
Step4: Correcting the Ideal Solution for Local Chemical Effects
Step6: A Small Simplification
Step8: Beyond the Bulk
Step9: The Free Energy of Our System
Step10: Solving the ODE Explicitly
Step12: and after some exciting manipulation, a solution is
Step13: Solving the PDE Using Relaxation
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interact, fixed
def idealSolution(GA, GB, XB, temperature):
Computes the free energy of solution for an ideal binary mixture.
Parameters
----------
GA : float
The partial molar Gibbs free energy of pure A in Joules.
GB : float
The partial molar Gibbs free energy of pure B in Joules.
XB : ndarray
The mol fraction of component B as an array.
temperature : float
The temperature.
Returns
-------
G : ndarray
An array of the Gibbs free energy having the same shape as `XB`.
Examples
--------
>>> XB = np.linspace(0.01,0.99,10)
>>> G = idealSolution(0.0,0.0,XB,1.0)
>>> array([ 0.53440324, -3.72037187, -4.76282566, -3.72037187, 0.53440324])
return (1.0-XB)*GA+XB*GB+8.314*temperature*((1-XB)*np.log(1-XB)+XB*np.log(XB))
def myfig(temperature):
This function produces a plot of the Gibbs free energy of mixing for an ideal solution.
GA = 1.0
GB = 500.0
XB = np.linspace(0.01,0.99,50)
temperatureSpace = np.linspace(1.0,100.0,10)
y = idealSolution(GA,GB,XB,temperature)
greySolutionLines = [idealSolution(GA,GB,XB,greyT) for greyT in temperatureSpace]
fig, axes = plt.subplots(figsize=(10,8))
for greyLine in greySolutionLines:
axes.plot(XB, greyLine, 'black', alpha=0.9)
axes.plot(XB, y, 'r', label=r"$G_A X_A + G_B X_B + RT(X_A \ln X_A + X_B \ln X_B)$", linewidth=4)
axes.legend()
axes.grid(True, linestyle='dotted')
axes.set_ylabel(r"$G_{soln}$")
axes.set_xlabel(r"$X_B$")
# Location for annotations can always be done by extents instead of absolute values.
axes.annotate(r'$G_A$='+str(GA)+'\n'+r'$G_B$='+str(GB),xy=(0,200), size='large')
plt.show()
return
interact(myfig, temperature=(1.0,100.0,1.0));
def regularSolution(GA, GB, XB, omega, temperature):
return omega*(1.0-XB)*XB+(1.0-XB)*GA+XB*GB+8.314*temperature*((1.0-XB)*np.log(1.0-XB)+XB*np.log(XB))
def myfig2(omega, temperature):
This function produces a plot of the Gibbs free energy of mixing for a regular solution.
GA = 1.0
GB = 1.0
XB = np.linspace(0.01,0.99,50)
temperatureSpace = np.linspace(1.0,200.0,10)
y = regularSolution(GA, GB, XB, omega, temperature)
greySolutionLines = [regularSolution(GA, GB, XB, omega, greyT) for greyT in temperatureSpace]
fig2, axes2 = plt.subplots(figsize=(14,9))
for greyLine in greySolutionLines:
axes2.plot(XB, greyLine, 'black', alpha=0.9)
axes2.plot(XB, y, 'r', label=r"$G_{soln}$", linewidth=4)
# Location for annotations can always be done by extents instead of absolute values.
axes2.annotate('GA='+str(GA)+'\n'+'GB='+str(GB),xy=(0,400), fontsize=20)
axes2.set_ylabel(r"$G_{soln}$", fontsize=15)
axes2.set_xlabel(r"$X_B$", fontsize=15)
axes2.legend(loc="upper right", fontsize=15)
axes2.xaxis.set_tick_params(labelsize=15)
axes2.yaxis.set_tick_params(labelsize=15)
plt.show()
return
interact(myfig2, omega=(0.0,5000.0,1.0), temperature=(1.0,200.0,1.0));
def regularSolution(GA, GB, XB, omega, temperature):
return omega*(1.0-XB)*XB+(1.0-XB)*GA+XB*GB+8.314*temperature*((1-XB)*np.log(1-XB)+XB*np.log(XB))
def simplifiedSolution(XB, W):
return (1.0-XB)**2*XB**2*W
def myfig3(omega, W, temperature):
This function ...
GA = 1.0
GB = 1.0
XB = np.linspace(0.01,0.99,50)
temperatureSpace = np.linspace(1.0,100.0,10)
y1 = regularSolution(GA, GB, XB, omega, temperature)
greySolutionLines = [regularSolution(GA, GB, XB, omega, greyT) for greyT in temperatureSpace]
wSpace = np.linspace(0.01,100.0,10)
y2 = simplifiedSolution(XB, W)
greyWLines = [simplifiedSolution(XB, greyW) for greyW in wSpace]
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14,8))
plt.tight_layout(pad=5.0)
#for greyLine in greyMagLines:
# axes[0].plot(eta, greyLine, 'grey', alpha=0.3)
#for greyLine in greyPhiLines:
# axes[1].plot(eta, greyLine, 'grey', alpha=0.3)
#axes[0].set_ylim(0,4)
#axes[0].plot(eta, y1, 'r', label=r"$MF(\eta,\xi)$")
#axes[1].set_ylim(0,180)
#axes[1].plot(eta, y2, 'b', label=r"$\phi(\eta,\xi)$")
for greyLine in greySolutionLines:
axes[0].plot(XB, greyLine, 'black', alpha=0.9)
axes[0].plot(XB, y1, 'r', label=r"Regular Solution", linewidth=4)
axes[0].annotate('GA='+str(GA)+'\n'+'GB='+str(GB),xy=(0,40), fontsize=15)
for greyLine in greyWLines:
axes[1].plot(XB, greyLine, 'black', alpha=0.9)
axes[1].plot(XB, y2, 'g', label=r"$W \phi^2 (1-\phi)^2$", linewidth=4)
axes[1].set_ylim(0.0,4.0)
#axes.plot(XB, y, 'r', label=r"$G_{soln}$")
#axes.legend()
#axes.grid(True, linestyle='dotted')
#axes.set_ylim(-600,200)
#axes.set_ylabel(r"$G_{soln}$")
#axes.set_xlabel(r"$X_B$")
for ax in axes:
ax.legend(loc="upper right", fontsize=15)
ax.set_ylabel(r"$G_{soln}$", fontsize=20)
ax.xaxis.set_tick_params(labelsize=15)
ax.yaxis.set_tick_params(labelsize=15)
axes[0].set_xlabel(r"$X_B$", fontsize=20)
axes[1].set_xlabel(r"$\phi$", fontsize=20)
plt.show()
return
interact(myfig3, omega=(0.0,1000.0,1.0), W=(0.0,100.0,1.0), temperature=(1.0,100.0,1.0));
def phiPlots():
This function's docstring explaining the function.
t = np.linspace(0,10,100)
y1 = np.cos(t)
y2 = np.cos(2*t)
y3 = 0*t
fig, axes = plt.subplots(figsize=(14,9))
axes.plot(t, y3, 'g--', label="Average")
axes.plot(t, y1, 'r', label="Profile 1")
axes.plot(t, y2, 'b', label="Profile 2")
axes.set_xlabel(r"$t$", fontsize=15)
axes.set_ylabel(r"$c(x)$", fontsize=15)
axes.legend(fontsize=15)
axes.xaxis.set_tick_params(labelsize=15)
axes.yaxis.set_tick_params(labelsize=15)
plt.show()
return
phiPlots()
import sympy as sp
sp.init_session(quiet=True)
phi, W, epsilon = symbols('phi W epsilon', real=true)
functionalForm = W*phi(x)**2*(1-phi(x))**2 + epsilon*(phi(x).diff(x))**2
ele = sp.euler_equations(functionalForm, phi(x), x)
ele
delFdelPhi = (ele[0].lhs).simplify()
delFdelPhi
firstTermsFactored = sp.factor(4*W*phi**3-6*W*phi**2+2*W*phi)
firstTermsFactored
import sympy as sp
sp.init_session(quiet=True)
sp.dsolve(sp.diff(f(x),x)*(1/(f(x)*(1-f(x))))-k,f(x),hint='lie_group')
def phiSolution(W, epsilon):
This function's docstring explaining the function.
x = np.linspace(-10,10,100)
y = 0.5*(1.0 + np.tanh((np.sqrt(W/epsilon))*(x/2.0)))
fig, axes = plt.subplots(figsize=(14,9))
axes.plot(x, y, 'r', label=r"$\phi(x)$")
axes.set_xlabel(r"$x$", fontsize=20)
axes.set_ylabel(r"$\phi(x)$", fontsize=20)
axes.xaxis.set_tick_params(labelsize=15)
axes.yaxis.set_tick_params(labelsize=15)
axes.legend(fontsize=20)
plt.show()
return
interact(phiSolution, W=(0.01,10,0.1), epsilon=(0.01,10,0.1));
%%HTML
<video width="600" height="600" controls> <source src="./images/Cahn-Hilliard.mp4" type="video/mp4">
</video>
from fipy import *
from IPython.display import clear_output
import time
nx = ny = 100
mesh = Grid2D(nx=nx, ny=ny, dx=0.5, dy=0.5)
phi = CellVariable(name=r"$\phi$", mesh=mesh)
psi = CellVariable(name=r"$\psi$", mesh=mesh)
noise = GaussianNoiseVariable(mesh=mesh,mean=0.5,variance=0.01).value
phi[:] = noise
viewer = Viewer(vars=phi)
D = a = epsilon = 1.
dfdphi = a**2 * 2 * phi * (1 - phi) * (1 - 2 * phi)
dfdphi_ = a**2 * 2 * (1 - phi) * (1 - 2 * phi)
d2fdphi2 = a**2 * 2 * (1 - 6 * phi * (1 - phi))
eq1 = (TransientTerm(var=phi) == DiffusionTerm(coeff=D, var=psi))
eq2 = (ImplicitSourceTerm(coeff=1., var=psi)
== ImplicitSourceTerm(coeff=-d2fdphi2, var=phi) - d2fdphi2 * phi + dfdphi
- DiffusionTerm(coeff=epsilon**2, var=phi))
eq3 = (ImplicitSourceTerm(coeff=1., var=psi)
== ImplicitSourceTerm(coeff=dfdphi_, var=phi)
- DiffusionTerm(coeff=epsilon**2, var=phi))
eq = eq1 & eq3
dexp = -3
elapsed = 0.
duration = 100.0
# Run the model.
while elapsed < duration:
dt = min(100, numerix.exp(dexp))
elapsed += dt
dexp += 0.01
eq.solve(dt=dt)
viewer.plot()
clear_output(wait=True)
display(viewer)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run CHILD in PyMT
Step2: You can now see the help information for Child. This time, have a look under the Parameters section (you may have to scroll down - it's the section after the citations). The Parameters section describes optional keywords that you can pass the the setup method. In the previous example we just used defaults. Below we'll see how to set input file parameters programmatically through keywords.
Step3: We can change input file paramters through setup keywords. The help description above gives a brief description of each of these. For this example we'll change the grid spacing, the size of the domain, and the duration of the simulation.
Step4: The setup folder now only contains the child input file.
Step5: Again, initialize and run the model for 10 time steps.
Step6: This time around it's now quite as clear what the units of time are. We can check in the same way as before.
Step7: Update until some time in the future. Notice that, in this case, we update to a partial time step. Child is fine with this however some other models may not be. For models that can not update to times that are not full time steps, PyMT will advance to the next time step and interpolate values to the requested time.
Step8: Child offers different output variables but we get them in the same way as before.
Step9: We can query each input and output variable. PyMT attaches a dictionary to each component called var that provides information about each variable. For instance we can see that "land_surface__elevation" has units of meters, is an input and output variable, and is defined on the nodes of grid with id 0.
Step10: If we plot this variable, we can visually see the unsructured triangular grid that Child has decomposed its grid into.
Step11: As with the var attribute, PyMT adds a dictionary, called grid, to components that provides a description of each of the model's grids. Here we can see how the x and y positions of each grid node, and how nodes connect to one another to form faces (the triangles in this case). Grids are described using the ugrid conventions.
Step12: Child initializes it's elevations with random noise centered around 0. We would like instead to give it elevations that have some land and some sea. First we'll get the x and y coordinates for each node along with their elevations.
Step13: All nodes above y=y_shore will be land, and all nodes below y=y_shore will be sea.
Step14: Just to verify we set things up correctly, we'll create a plot.
Step15: To get things going, we'll run the model for 5000 years and see what things look like.
Step16: We'll have some fun now by adding a simple uplift component. We'll run the component for another 5000 years but this time uplifting a corner of the grid by dz_dt.
Step17: A portion of the grid was uplifted and channels have begun eroding into it.
Step18: We now stop the uplift and run it for an additional 5000 years.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
# Some magic to make plots appear within the notebook
%matplotlib inline
import numpy as np # In case we need to use numpy
from pymt import plugins
model = plugins.Child()
help(model)
rm -rf _model # Clean up for the next step
config_file, initdir = model.setup('_model',
grid_node_spacing=750.,
grid_x_size=20000.,
grid_y_size=40000.,
run_duration=1e6)
ls _model
model.initialize(config_file, initdir)
for t in range(10):
model.update()
print(model.get_current_time())
model.time_units
model.update_until(201.5, units='year')
print(model.get_current_time())
model.output_var_names
model.get_value('land_surface__elevation')
model.var['land_surface__elevation']
model.quick_plot('land_surface__elevation', edgecolors='k', vmin=-200, vmax=200, cmap='BrBG_r')
model.grid[0]
x, y = model.get_grid_x(0), model.get_grid_y(0)
z = model.get_value('land_surface__elevation')
y_shore = 15000.
z[y < y_shore] -= 100
z[y >= y_shore] += 100
model.set_value('land_surface__elevation', z)
model.quick_plot('land_surface__elevation', edgecolors='k', vmin=-200, vmax=200, cmap='BrBG_r')
model.update_until(5000.)
model.quick_plot('land_surface__elevation', edgecolors='k', vmin=-200, vmax=200, cmap='BrBG_r')
dz_dt = .02
now = model.get_current_time()
times, dt = np.linspace(now, now + 5000., 50, retstep=True)
for time in times:
model.update_until(time)
z = model.get_value('land_surface__elevation')
z[(y > 15000.) & (x > 10000.)] += dz_dt * dt
model.set_value('land_surface__elevation', z)
model.quick_plot('land_surface__elevation', edgecolors='k', vmin=-200, vmax=200, cmap='BrBG_r')
model.update_until(model.get_current_time() + 5000.)
model.quick_plot('land_surface__elevation', edgecolors='k', vmin=-200, vmax=200, cmap='BrBG_r')
model.get_value('channel_water_sediment~bedload__mass_flow_rate')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we will need parameters for the simulation. These are known as intial condititons. For a 2 body gravitation problem, we'll need to know the masses of the two objects, the starting posistions of the two objects, and the starting velocities of the two objects.
Step2: It will be nice to create a function for the force between Ma and Mb. Below is the physics for the force of Ma on Mb. How the physics works here is not important for the moment. Right now, I want to make sure you can transfer the math shown into a python function. I'll show a picture on the board the physics behind this math for those interested.
Step3: Now that we have our function, we need to prepare a loop. Before we do, we need to intialize the loop and choose a loop type, for or while. Below is the general outline for how each type of loop can gp.
Step4: Now for the actual simulation. This is the hardest part to code in. The general idea behind our loop is that as we step through time, we calculate the force, then calculate the new velocity, then the new posistion for each particle. At the end, we must update our arrays to reflect the new changes and update the time of the system. The time is super important! If we don't (say in a while loop), the simulation would never end and we would never get our result.
Step5: Now for the fun part (or not so fun part if your simulation had an issue), plot your results! This is something well covered in previous lectures. Show me a plot of (xa,ya) and (xb,yb). Does it look sort of familiar? Hopfully you get something like the below image (in units of AU).
|
<ASSISTANT_TASK:>
Python Code:
#Physical Constants (SI units)
G=6.67e-11
AU=1.5e11 #meters. Distance between sun and earth.
daysec=24.0*60*60 #seconds in a day
#####run specfic constants. Change as needed#####
#Masses in kg
Ma=6.0e24 #always set as smaller mass
Mb=2.0e30 #always set as larger mass
#Time settings
t=0.0 #Starting time
dt=.01*daysec #Time set for simulation
tend=300*daysec #Time where simulation ends
#Intial conditions (posistion [m] and velocities [m/s] in x,y,z coorindates)
#For Ma
xa=1.0*AU
ya=0.0
vxa=0.0
vya=30000.0
#For Mb
xb=0.0
yb=0.0
vxb=0.0
vyb=0.0
#Function to compute the force between the two objects
def FG(xa,xb,ya,yb):
#Computer rx and ry between Ma and Mb
rx=xb-xa
ry=#Write it in
#compute r^3
r3=#Write in r^3 using the equation above. Make use of np.sqrt()
#Compute the force in Newtons. Use the equations above as a Guide!
fx=#Write it in
fy=-#Write it in
return #What do we return?
#Run a loop for the simulation. Keep track of Ma and Mb posistions
#Intialize vectors
xaAr=np.array([])
yaAr=np.array([])
xbAr=#Write it in for Particle B
ybAr=#Write it in for Particle B
#Your loop here
#using while loop method with appending. Can also be done with for loops
while #What is our condition for ending?:
#Compute current force on Ma and Mb. Ma recieves the opposite force of Mb
fx,fy=Fg(xa,ya,xb,yb)
#Update the velocities and posistions of the particles
vxa=vxa-fx*dt/Ma
vya=#Write it in for y
vxb=#Write it in
vyb=#Write it in
xa=xa+vxa*dt
ya=#Wite it in
xb=#Write it in
yb=#Write it in
#Save data to lists
xaAr=np.append(xaAr,xa)
yaAr=#How will I save it for yaAr?
xbAr=np.append(xbAr,xb)
ybAr=np.append(ybAr,yb)
#update the time by one time step, dt
#How do I update the time?
from IPython.display import Image
Image("Earth-Sun-averageResult.jpg")
#Your plot here
plt.plot(#Particle A plot
plt.plot(#Partcile B plot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup
Step3: Data simulation
Step4: Let's simulate two timeseries and plot some basic information about them.
Step5: Now we put the signals at two locations on the cortex. We construct a
Step6: Before we simulate the sensor-level data, let's define a signal-to-noise
Step7: Now we run the signal through the forward model to obtain simulated sensor
Step8: We create an
Step9: Power mapping
Step10: We will now compute the cortical power map at 10 Hz. using a DICS beamformer.
Step12: Plot the DICS power maps for both approaches, starting with the first
Step13: Now the second
|
<ASSISTANT_TASK:>
Python Code:
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from scipy.signal import welch, coherence, unit_impulse
from matplotlib import pyplot as plt
import mne
from mne.simulation import simulate_raw, add_noise
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd
# We use the MEG and MRI setup from the MNE-sample dataset
data_path = sample.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
# Filenames for various files we'll be using
meg_path = op.join(data_path, 'MEG', 'sample')
raw_fname = op.join(meg_path, 'sample_audvis_raw.fif')
fwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
cov_fname = op.join(meg_path, 'sample_audvis-cov.fif')
fwd = mne.read_forward_solution(fwd_fname)
# Seed for the random number generator
rand = np.random.RandomState(42)
sfreq = 50. # Sampling frequency of the generated signal
n_samp = int(round(10. * sfreq))
times = np.arange(n_samp) / sfreq # 10 seconds of signal
n_times = len(times)
def coh_signal_gen():
Generate an oscillating signal.
Returns
-------
signal : ndarray
The generated signal.
t_rand = 0.001 # Variation in the instantaneous frequency of the signal
std = 0.1 # Std-dev of the random fluctuations added to the signal
base_freq = 10. # Base frequency of the oscillators in Hertz
n_times = len(times)
# Generate an oscillator with varying frequency and phase lag.
signal = np.sin(2.0 * np.pi *
(base_freq * np.arange(n_times) / sfreq +
np.cumsum(t_rand * rand.randn(n_times))))
# Add some random fluctuations to the signal.
signal += std * rand.randn(n_times)
# Scale the signal to be in the right order of magnitude (~100 nAm)
# for MEG data.
signal *= 100e-9
return signal
signal1 = coh_signal_gen()
signal2 = coh_signal_gen()
fig, axes = plt.subplots(2, 2, figsize=(8, 4))
# Plot the timeseries
ax = axes[0][0]
ax.plot(times, 1e9 * signal1, lw=0.5)
ax.set(xlabel='Time (s)', xlim=times[[0, -1]], ylabel='Amplitude (Am)',
title='Signal 1')
ax = axes[0][1]
ax.plot(times, 1e9 * signal2, lw=0.5)
ax.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Signal 2')
# Power spectrum of the first timeseries
f, p = welch(signal1, fs=sfreq, nperseg=128, nfft=256)
ax = axes[1][0]
# Only plot the first 100 frequencies
ax.plot(f[:100], 20 * np.log10(p[:100]), lw=1.)
ax.set(xlabel='Frequency (Hz)', xlim=f[[0, 99]],
ylabel='Power (dB)', title='Power spectrum of signal 1')
# Compute the coherence between the two timeseries
f, coh = coherence(signal1, signal2, fs=sfreq, nperseg=100, noverlap=64)
ax = axes[1][1]
ax.plot(f[:50], coh[:50], lw=1.)
ax.set(xlabel='Frequency (Hz)', xlim=f[[0, 49]], ylabel='Coherence',
title='Coherence between the timeseries')
fig.tight_layout()
# The locations on the cortex where the signal will originate from. These
# locations are indicated as vertex numbers.
vertices = [[146374], [33830]]
# Construct SourceEstimates that describe the signals at the cortical level.
data = np.vstack((signal1, signal2))
stc_signal = mne.SourceEstimate(
data, vertices, tmin=0, tstep=1. / sfreq, subject='sample')
stc_noise = stc_signal * 0.
snr = 1. # Signal-to-noise ratio. Decrease to add more noise.
# Read the info from the sample dataset. This defines the location of the
# sensors and such.
info = mne.io.read_info(raw_fname)
info.update(sfreq=sfreq, bads=[])
# Only use gradiometers
picks = mne.pick_types(info, meg='grad', stim=True, exclude=())
mne.pick_info(info, picks, copy=False)
# Define a covariance matrix for the simulated noise. In this tutorial, we use
# a simple diagonal matrix.
cov = mne.cov.make_ad_hoc_cov(info)
cov['data'] *= (20. / snr) ** 2 # Scale the noise to achieve the desired SNR
# Simulate the raw data, with a lowpass filter on the noise
stcs = [(stc_signal, unit_impulse(n_samp, dtype=int) * 1),
(stc_noise, unit_impulse(n_samp, dtype=int) * 2)] # stacked in time
duration = (len(stc_signal.times) * 2) / sfreq
raw = simulate_raw(info, stcs, forward=fwd)
add_noise(raw, cov, iir_filter=[4, -4, 0.8], random_state=rand)
events = mne.find_events(raw, initial_event=True)
tmax = (len(stc_signal.times) - 1) / sfreq
epochs = mne.Epochs(raw, events, event_id=dict(signal=1, noise=2),
tmin=0, tmax=tmax, baseline=None, preload=True)
assert len(epochs) == 2 # ensure that we got the two expected events
# Plot some of the channels of the simulated data that are situated above one
# of our simulated sources.
picks = mne.pick_channels(epochs.ch_names, mne.read_selection('Left-frontal'))
epochs.plot(picks=picks)
# Compute the inverse operator
fwd = mne.read_forward_solution(fwd_fname)
inv = make_inverse_operator(epochs.info, fwd, cov)
# Apply the inverse model to the trial that also contains the signal.
s = apply_inverse(epochs['signal'].average(), inv)
# Take the root-mean square along the time dimension and plot the result.
s_rms = np.sqrt((s ** 2).mean())
title = 'MNE-dSPM inverse (RMS)'
brain = s_rms.plot('sample', subjects_dir=subjects_dir, hemi='both', figure=1,
size=600, time_label=title, title=title)
# Indicate the true locations of the source activity on the plot.
brain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh')
brain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh')
# Rotate the view and add a title.
brain.show_view(view={'azimuth': 0, 'elevation': 0, 'distance': 550,
'focalpoint': [0, 0, 0]})
# Estimate the cross-spectral density (CSD) matrix on the trial containing the
# signal.
csd_signal = csd_morlet(epochs['signal'], frequencies=[10])
# Compute the spatial filters for each vertex, using two approaches.
filters_approach1 = make_dics(
info, fwd, csd_signal, reg=0.05, pick_ori='max-power', depth=1.,
inversion='single', weight_norm=None)
print(filters_approach1)
filters_approach2 = make_dics(
info, fwd, csd_signal, reg=0.05, pick_ori='max-power', depth=None,
inversion='matrix', weight_norm='unit-noise-gain')
print(filters_approach2)
# You can save these to disk with:
# filters_approach1.save('filters_1-dics.h5')
# Compute the DICS power map by applying the spatial filters to the CSD matrix.
power_approach1, f = apply_dics_csd(csd_signal, filters_approach1)
power_approach2, f = apply_dics_csd(csd_signal, filters_approach2)
def plot_approach(power, n):
Plot the results on a brain.
title = 'DICS power map, approach %d' % n
brain = power_approach1.plot(
'sample', subjects_dir=subjects_dir, hemi='both',
size=600, time_label=title, title=title)
# Indicate the true locations of the source activity on the plot.
brain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh', color='b')
brain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh', color='b')
# Rotate the view and add a title.
brain.show_view(view={'azimuth': 0, 'elevation': 0, 'distance': 550,
'focalpoint': [0, 0, 0]})
return brain
brain1 = plot_approach(power_approach1, 1)
brain2 = plot_approach(power_approach2, 2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Input (CSV) filename
Step2: Determine output (SQLite DB) filename from the input filename
Step3: Connect to an SQL data source
Step4: Convert CSV to SQLite DB
|
<ASSISTANT_TASK:>
Python Code:
from sqlalchemy import create_engine # database connection
import datetime as dt # for timing
import pandas as pd # for data frames
#CSV_FILE = 'NYC-311-2M.csv'
CSV_FILE = None
assert CSV_FILE
import re
CSV_BASES = re.findall (r'(.*)\.csv$', CSV_FILE, re.I)
assert len (CSV_BASES) >= 1
CSV_BASE = CSV_BASES[0]
DB_FILE = "%s.db" % CSV_BASE
print ("Converting: %s to %s (an SQLite DB) ..." % (CSV_FILE, DB_FILE))
disk_engine = create_engine ('sqlite:///%s' % DB_FILE)
# Convert .csv to .db
CHUNKSIZE = 25000 # Number of rows to read at a time
# List of columns to keep
KEEP_COLS = ['Agency', 'CreatedDate', 'ClosedDate', 'ComplaintType',
'Descriptor', 'CreatedDate', 'ClosedDate',
'TimeToCompletion', 'City']
start = dt.datetime.now () # start timer
j = 0
index_start = 1
for df in pd.read_csv(CSV_FILE, chunksize=CHUNKSIZE, iterator=True, encoding='utf-8'):
# Remove spaces from columns
df = df.rename(columns={c: c.replace(' ', '') for c in df.columns})
# Convert to proper date+timestamps
df['CreatedDate'] = pd.to_datetime(df['CreatedDate'])
df['ClosedDate'] = pd.to_datetime(df['ClosedDate'])
df.index += index_start
for c in df.columns:
if c not in KEEP_COLS:
df = df.drop (c, axis=1)
j += 1
print ('{} seconds: completed {} rows'.format((dt.datetime.now() - start).seconds, j*chunksize))
df.to_sql('data', disk_engine, if_exists='append')
index_start = df.index[-1] + 1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Be sure to input 'y' into the cell below to allow conda to install hh-suite to this colab.
Step2: Using hhsearch
Step3: Let's do an example. Say we have a protein which we want to compare to a MSA in order to identify any homologous regions. For this we can use hhsearch.
Step5: Now let's take some protein sequence and search through the dbCAN database to see if we can find any potential homologous regions. First we will specify the sequence and save it as a FASTA file or a3m file in order to be readable by hhsearch. I pulled this sequence from the example query.a3m in the hhsuite data directory.
Step6: Then we can call hhsearch, specifying the query sequence with the -i flag, the database to search through with -d, and the output with -o.
Step8: The 'Prob' column describes the estimated probability of the query sequence being at least partially homologous to the template. Probabilities of 95% or more are nearly certain, and probabilities of 30% or more call for closer consideration. The E value tells you how many random matches with a better score would be expected if the searched database was unrelated to the query sequence. These results show that none of the sequences align well with our randomly chosen protein, which is to be expected because our query sequence was chosen at random.
Step9: As you can see, there are 2 sequences which are a match for our query sequence.
|
<ASSISTANT_TASK:>
Python Code:
!wget -c https://repo.anaconda.com/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh
!chmod +x Miniconda3-4.5.4-Linux-x86_64.sh
!bash ./Miniconda3-4.5.4-Linux-x86_64.sh -b -f -p /usr/local
!conda install -c conda-forge -c bioconda hhsuite
!hhsearch
%%bash
cd /content/
mkdir hh
cd hh
mkdir databases; cd databases
wget http://wwwuser.gwdg.de/~compbiol/data/hhsuite/databases/hhsuite_dbs/dbCAN-fam-V9.tar.gz
tar xzvf dbCAN-fam-V9.tar.gz
with open('protein.fasta', 'w') as f:
f.write(
>Uncharacterized bovine protein (Fragment)
--PAGGQCtgiWHLLTRPLRP--QGRLPGLRVKYVFLVWLGVFAGSWMAYTHYSSYAELCRGHICQVVICDQFRKGIISGSICQDLCHLHQVEWRTCLSSVPGQQVYSGLWQGKEVTIKCGIEESLNSKAGSDGAPRRELVLFDKPSRGTSIKEFREMTLSFLKANLGDLPSLPALVGRVLLMADFNKDNRVSLAEAKSVWALLQRNEFLLLLSLQEKEHASRLLGYCGDLYVTEGVPLSSWPGATLPPLLRPLLPPALHGALQQWLGPAWPWRAKIAMGLLEFVEDLFHGAYGNFYMCETTLANVGYTAKYDFRMADLQQVAPEAAVRRFLRGRRCEHSADCTYGRDCRAPCDTLMRQCKGDLVQPNLAKVCELLRDYLLPGAPAALRPELGKQLRTCTTLSGLASQVEAHHSLVLSHLKSLLWKEISDSRYT
)
!hhsearch -i /content/protein.fasta -d /content/hh/databases/dbCAN-fam-V9 -o /content/protein.hhr
with open('protein2.fasta', 'w') as f:
f.write(>dockerin,22,NCBI-Bacteria,gi|125972715|ref|YP_001036625.1|,162-245,0.033
SCADLNGDGKITSSDYNLLKRYILHLIDKFPIGNDETDEGINDGFNDETDEDINDSFIEANSKFAFDIFKQISKDEQGKNVFIS
)
!hhsearch -i /content/protein2.fasta -d /content/hh/databases/dbCAN-fam-V9 -o /content/protein2.hhr
!wget -O protein3.fasta https://www.uniprot.org/uniprot/G8M3C3.fasta
!hhblits -i /content/protein3.fasta -d /content/hh/databases/dbCAN-fam-V9 -oa3m query.a3m -n 2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a contour plot of temperature
Step2: Create an image plot of Geopotential height
Step3: Plot the data on a map
|
<ASSISTANT_TASK:>
Python Code:
import xarray as xr
from metpy.cbook import get_test_data
from metpy.plots import ContourPlot, ImagePlot, MapPanel, PanelContainer
from metpy.units import units
# Use sample NARR data for plotting
narr = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = narr
contour.field = 'Temperature'
contour.level = 850 * units.hPa
contour.linecolor = 'red'
contour.contours = 15
img = ImagePlot()
img.data = narr
img.field = 'Geopotential_height'
img.level = 850 * units.hPa
panel = MapPanel()
panel.area = 'us'
panel.layers = ['coastline', 'borders', 'states', 'rivers', 'ocean', 'land']
panel.title = 'NARR Example'
panel.plots = [contour, img]
pc = PanelContainer()
pc.size = (10, 8)
pc.panels = [panel]
pc.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Приведем данные в порядок
Step2: Посмотрим процент пропущенных данных по признакам
Step3: Число уникальных значений для этих колонок
Step4: Итак, нужно избавить данные от пропусков. Сопоставим информацию и составим план действий.
Step5: Nfloors
Step6: Metrdist, Walk
Step7: Учитывая, что средняя скорость пешехода 4 км/ч, а машины 50 км/ч, заполним столбец со значениями предполагаемых километров до метро (помним, что metrdist содержит информацию в минутах)
Step8: Посмотрим процент странных значений и выбросим эти данные, если он будет мал
Step9: Посчитаем среднее значение по округу, заполним пропуски
Step10: Для понимания того, как работать с Kitsp и Livsp, изучим обязательное поле Totsp. Посмотрим на минимум и максимум, посмотрим на распределение.
Step11: Посмотрю на то, что кроется за краевыми значениями
Step12: Бывают квартиры общей площадью 11 м.кв., например. Во-первых, по фотографиям видно, что это не так, во-вторых, есть некоторые закрепленные нормы на минимальную площадь жилого помещения, и это около 20 кв.м. В-третьих, странных квартир мало, так что я их просто удалю.
Step13: Что теперь можно делать с этой информацией? Нам надо заполнить пропуски в Kitsp и Livsp. Можно предположить линейную зависимость этих параметров от Totsp и попробовать использовать линейную модель для заполнения nanов. Да, линейная модель предскажет значения с ошибками, но если заполнять пропуски без нее, то ошибки тоже будут. А у нее они будут наверняка поменьше, чем у какого-нибудь обычного заполнения медианой.
Step14: Посмотрим на получившиеся распределения
Step15: Снова удалим маленький процент странных данных
Step16: Теперь изучим данные на общую адекватность. Начнем с распределения цен.
Step17: Так данные выглядеть не должны. Сначала уберу outliers, при построении модели попробую поработать с логарифмом от цены.
Step18: Изучим данные по этажам
Step19: Номер этажа нигде не больше этажности дома, уже неплохо)
Step20: Не понятно наличие хвоста. Посмотрим на него
Step21: Это реальные данные, но их мало и они сильно портят распределение, уберем их
Step22: Так лучше. Посмотрим на распределение Nfloors
Step23: Аналогично
Step24: Теперь посмотрим на распределение числа комнат
Step25: Теперь на распределение расстояний до центра города
Step26: Снова изучим хвост ( > 28 )
Step27: Вот здесь уже появляется странная информация. Например, реальный адрес по Одинцовскому району Московской области, но при этом указано м. Парк Победы. Да, от него наверное удобно добираться, если ехать из Москвы. Но это не отменяет того, что на самом деле поездка такая будет ИЗ Москвы и уже в Подмосковье.
Step28: Прикинув по карте, получила 27. Не проходящих порог данных меньше 1%, удаляю их.
Step29: Снова посмотрим на распределение
Step30: Стало лучше.
Step31: Видна положительная корреляция цены с площадями и числом комнат, а также отрицательная с расстоянием до центра. Выглядит понятно
Step32: Чем больше комнат, тем больше цена, многокомнатных меньше, чем других вариантов
Step33: Чем больше расстояние до центра, тем меньше цена
Step34: Чем больше площадь, тем больше цена
Step35: С большим отрывом лидирует ЦАО
Step36: И сохраним данные. Остальное в следующем ноутбуке
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
plt.style.use('bmh')
%matplotlib inline
import random
random.seed(42)
np.random.seed(42)
districts = {1: 'NW', 4: 'C', 5:'N', 6:'NE', 7:'E', 8:'SE', 9:'S', 10:'SW', 11:'W'}
data = pd.read_csv('cian_full_data.csv')
data.head()
data.drop(0, axis = 0, inplace = True)
data.head()
data.index = [x for x in range(len(data.index))]
data.rename(columns={'0' : 'Bal', '1': 'Brick', '2': 'Distance', '3': 'Floor', '4': 'Kitsp', '5': 'Livsp', '6': 'Metrdist',
'7': 'New', '8': 'Nfloors', '9': 'Price', '10': 'Rooms', '11': 'Tel', '12': 'Totsp',
'13': 'Walk', '14': 'link'}, inplace = True)
data.head()
data.shape
def nans_appearance(data):
columns = []
nans = data.isnull()
for i in data.columns:
x = nans[nans[i] == True].shape[0]
if (x > 0):
columns.append(i)
print('nans in', i, ':', x, '; percentage : '"%.3f" %(x/data.shape[0]))
return columns
col_nans = nans_appearance(data)
def my_nunique(data, columns):
for i in columns:
print(str(i) + ' : ' + str(data[i].nunique()))
my_nunique(data, col_nans)
data.drop('Tel', axis = 1, inplace = True)
data[data['Nfloors'].isnull()].shape
data = data[data['Nfloors'] > 0]
data.index = [x for x in range(len(data.index))]
data.shape
data['Metrokm'] = np.nan
for i in range(data.shape[0]):
if (data['Walk'][i] == 1.):
data['Metrokm'][i] = 4/60 * data['Metrdist'][i]
elif (data['Walk'][i] == 0.):
data['Metrokm'][i] = 5/6 * data['Metrdist'][i]
data[data['Metrokm'] > 17].shape[0]/data.shape[0]
data = data[(data['Metrokm'] <= 17) | (data['Metrokm'].isnull())]
data.index = [x for x in range(len(data.index))]
metrokms_distr=[]
for i in districts.values():
tmp = data[data['Distr'] == i]
med = tmp['Metrokm'].median()
metrokms_distr.append(med)
tmp = []
ind = 0
for i in districts.keys():
tmp.append((districts.get(i), metrokms_distr[ind]))
ind += 1
tmp = dict(tmp)
tmp
for i in districts.values():
for j in range(data.shape[0]):
if (data['Distr'][j] == i):
if (not(data['Metrokm'][j] >= 0)):
value = tmp.get(i)
data['Metrokm'][j] = value
data.drop(['Metrdist', 'Walk'], axis = 1, inplace = True)
nans_appearance(data)
totsps = data['Totsp'].unique()
totsps.sort()
print('min =', totsps.min(), '; max =', totsps.max())
count_totsps = []
for i in totsps:
count_totsps.append(data[data['Totsp'] == i].shape[0])
plt.hist(data['Totsp'], color='r')
plt.show()
data[data['Totsp'] < 18]['link']
data[data['Totsp'] < 20].shape[0]/data.shape[0]
data = data[data['Totsp'] >= 20]
data.index = [x for x in range(len(data.index))]
data[data['Totsp'] > 320].shape[0]/data.shape[0]
data = data[data['Totsp'] < 320]
data.index = [x for x in range(len(data.index))]
plt.hist(data['Totsp'], color='r')
plt.show()
data.shape
train = data[data['Kitsp'] > 0]
train.shape
test = data[data['Kitsp'].isnull()]
test.shape
X = train['Totsp']
X = X.reshape((X.shape[0],1))
Y = train['Kitsp']
Y = Y.reshape((Y.shape[0],1))
X_test = test['Totsp']
X_test = X_test.reshape((X_test.shape[0],1))
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
Xtr, Xval, Ytr, Yval = train_test_split(X, Y, test_size = 0.2)
model_kitchen = LinearRegression(normalize = True)
model_kitchen.fit(Xtr, Ytr)
pred = model_kitchen.predict(Xval)
from sklearn.metrics import mean_squared_error
from math import sqrt
print('rmse:', sqrt(mean_squared_error(Yval, pred)))
Y_test = model_kitchen.predict(X_test)
ind = 0
for i in range(data.shape[0]):
if (not(data['Kitsp'][i] >= 0)):
data['Kitsp'][i] = Y_test[ind]
ind += 1
nans_appearance(data)
train = data[data['Livsp'] > 0]
train.shape
test = data[data['Livsp'].isnull()]
test.shape
X = train['Totsp']
X = X.reshape((X.shape[0],1))
Y = train['Livsp']
Y = Y.reshape((Y.shape[0],1))
X_test = test['Totsp']
X_test = X_test.reshape((X_test.shape[0],1))
Xtr, Xval, Ytr, Yval = train_test_split(X, Y, test_size = 0.1)
model_liv = LinearRegression()
model_liv.fit(Xtr, Ytr)
pred = model_liv.predict(Xval)
print('rmse:', sqrt(mean_squared_error(Yval, pred)))
Y_test = model_liv.predict(X_test)
ind = 0
for i in range(data.shape[0]):
if (not(data['Livsp'][i] >= 0)):
data['Livsp'][i] = Y_test[ind]
ind += 1
plt.hist(data['Kitsp'], color = 'r')
plt.show()
plt.hist(data['Livsp'], color = 'r')
plt.show()
data[data['Kitsp'] < 4].shape
data = data[data['Kitsp'] >= 4]
data.index = [x for x in range(len(data.index))]
data[data['Livsp'] < 10].shape
data = data[data['Livsp'] >= 10]
data.index = [x for x in range(len(data.index))]
nans_appearance(data)
print('min: ', data['Price'].min(), '; max:', data['Price'].max())
plt.hist(data['Price'], color = 'r')
plt.show()
data[data['Price'] < 1500000].shape[0]
data.drop(data[data['Price'] < 1500000].index, inplace = True)
data.index = [x for x in range(len(data.index))]
data[data['Price'] > 200000000].shape[0]/data.shape[0]
data.drop(data[data['Price'] > 200000000].index, inplace = True)
data.index = [x for x in range(len(data.index))]
plt.hist(data['Price'], color = 'r')
plt.show()
data[data['Nfloors'] < data['Floor']].shape
plt.hist(data['Floor'], color = 'r')
plt.show()
tmp = data[data['Floor'] > 40]
tmp[['Floor', 'link']]
data.drop(data[data['Floor'] > 40].index, inplace = True)
data.index = [x for x in range(len(data.index))]
plt.hist(data['Floor'], color = 'r')
plt.show()
plt.hist(data['Nfloors'], color = 'r')
plt.show()
data[data['Nfloors'] > 50].shape[0]/data.shape[0]
data.drop(data[data['Nfloors'] > 50].index, inplace = True)
data.index = [x for x in range(len(data.index))]
plt.hist(data['Nfloors'], color = 'r')
plt.show()
rooms = data['Rooms'].unique()
rooms.sort()
count_rooms = []
for i in rooms:
tmp = data[data['Rooms'] == i]
count_rooms.append(tmp.shape[0])
width = 1
plot = plt.bar(rooms, count_rooms, width, color='r')
plt.show()
plt.hist(data['Distance'], color='r')
plt.show()
data[data['Distance'] > 28].shape[0]
tmp = data[data['Distance'] > 28]
tmp[['Distance', 'link']]
data[data['Distance'] > 27].shape[0]/data.shape[0]
data = data[data['Distance'] <= 27]
data.index = [x for x in range(len(data.index))]
plt.hist(data['Distance'], color='r')
plt.show()
data.columns
data_corr = data[['Distance', 'Floor', 'Kitsp', 'Livsp', 'New', 'Nfloors', 'Price',
'Rooms', 'Totsp', 'Metrokm']]
correlation = data_corr.corr()
f, ax = plt.subplots(figsize=(10, 10))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(correlation, cmap=cmap, vmax=.3,
square=True, xticklabels=correlation.columns.values,
yticklabels=correlation.columns.values,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plot = plt.scatter(data['Rooms'], data['Price'], color = 'r')
plt.show()
plot = plt.scatter(data['Distance'], data['Price'], color = 'r')
plt.show()
plot = plt.scatter(data['Totsp'], data['Price'], color = 'r')
plt.show()
prices_distr = []
for i in districts.keys():
tmp = data[data['Distr'] == districts.get(i)]
price = tmp['Price'].median()
prices_distr.append(price)
plot = plt.bar(range(9), prices_distr, color='r')
plt.show()
districts
to_encode = ['Bal', 'Brick', 'Distr']
data[to_encode] = data[to_encode].fillna(value = 'na')
onehots = pd.get_dummies(data[to_encode])
onehots.head()
data = pd.concat([data, onehots], axis=1)
data.columns
data.drop(to_encode, axis = 1, inplace = True)
data.drop('link', axis = 1, inplace = True)
data.shape
data.to_csv('cian_data_clear_for_modeling.csv', index = False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read data. There are two datasets
Step2: Unify index, caps everything and strip of trailing spaces.
Step3: Drop duplicates (shouldn't be any)
Step4: Split Planon import into loggers and meters
Step5: Index unique? show number of duplicates in index
Step6: Meters are not unique. This is becasue of the spaces served. This is ok for now, we will deal with duplicates at the comparison stage. Same is true for loggers - in the unlikely event that there are duplicates in the future.
Step7: 2. Validation
Step8: 2.1. Meters
Step9: Planon sensors are not unique because of the spaces served convention in the two data architectures. The Planon architecture devotes a new line for each space served - hence the not unique index. The Master architecture lists all the spaces only once, as a list, therefore it has a unique index. We will need to take this into account and create matching dataframe out of planon for comparison, with a unique index.
Step10: Sort datasets after index for easier comparison.
Step11: 2.1.1 Slicing of meters to only certain columns of comparison
Step12: Create dictionary that maps Planon column names onto Master.
Step13: Filter both dataframes based on these new columns. Then remove duplicates. Currently, this leads to loss of information of spaces served, but also a unique index for the Planon dataframe, therefore bringing the dataframes closer to each other. When including spaces explicitly in the comparison (if we want to - or just trust the Planon space mapping), this needs to be modified.
Step14: Unify headers, drop duplicates (bear the mind the spaces argument, this where it needs to be brought back in in the future!).
Step15: Fiscal/Tenant meter name needs fixing from Yes/No and 1/0.
Step16: Cross-check missing meters
Step17: Check for duplicates in index, but not duplicates over the entire row
Step18: The duplicates are the nans. Remove these for now. Could revisit later to do an index-less comparison, only over row contents.
Step19: Do comparison only on common indices. Need to revisit and identify the cause missing meters, both ways (5 Planon->Meters and 30 Meters->Planon in this example).
Step20: Still have duplicate indices. For now we just drop and keep the first.
Step21: 2.1.2. Primitive comparison
Step22: 2.1.3. Horizontal comparison
Step23: Percentage matching
Step24: 2.1.4. Vertical comparison
Step25: 2.1.5. Smart(er) comparison
Step26: Some errors fixed, some left. Let's see which ones. These are either
Step27: Let us repeat the exercise for Logger Channel. Cross-validate, flag as highly likely error where both mismatch.
Step28: All errors fixed on logger channels.
Step29: New error percentage
Step30: 2.2. Loggers
Step31: Create dictionary that maps Planon column names onto Master.
Step32: Loggers matching
Step33: Percentage matching
Step34: Loggers not matching on Building Name.
Step35: That didnt help.
Step36: Follow up with lexical distance comparison. That would flag this as a match.
Step37: Technically the same, but there is a number format error. Compare based on float value, if they match, replace one of them. This needs to be amended, as it will throw cannot onvert to float exception if strings are left in from the previous step.
Step38: New error percentage
Step39: (Bearing in my mind the above, this is technically 0)
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
planon=pd.read_excel('EIS Assets v2.xlsx',index_col = 'Code')
#master_loggerscontrollers_old = pd.read_csv('LoggersControllers.csv', index_col = 'Asset Code')
#master_meterssensors_old = pd.read_csv('MetersSensors.csv', encoding = 'macroman', index_col = 'Asset Code')
master='MASTER PlanonLoggersAndMeters 17 10 16.xlsx'
master_loggerscontrollers=pd.read_excel(master,sheetname='Loggers Controllers', index_col = 'Asset Code')
master_meterssensors=pd.read_excel(master,sheetname='Meters Sensors', encoding = 'macroman', index_col = 'Asset Code')
planon['Code']=planon.index
master_loggerscontrollers['Code']=master_loggerscontrollers.index
master_meterssensors['Code']=master_meterssensors.index
set(master_meterssensors['Classification Group'])
set(master_loggerscontrollers['Classification Group'])
new_index=[]
for i in master_meterssensors.index:
if '/' not in i:
new_index.append(i[:i.find('-')+1]+i[i.find('-')+1:].replace('-','/'))
else:
new_index.append(i)
master_meterssensors.index=new_index
master_meterssensors['Code']=master_meterssensors.index
new_index=[]
for i in master_meterssensors.index:
logger=i[:i.find('/')]
if master_loggerscontrollers.loc[logger]['Classification Group']=='BMS controller':
meter=i[i.find('/')+1:]
if meter[0] not in {'N','n','o','i'}:
new_index.append(i)
else:
new_index.append(i)
len(master_meterssensors)
master_meterssensors=master_meterssensors.loc[new_index]
len(master_meterssensors)
master_meterssensors.to_csv('meterssensors.csv')
master_loggerscontrollers.to_csv('loggerscontrollers.csv')
planon.index=[str(i).strip() for i in planon.index]
master_loggerscontrollers.index=[str(i).strip() for i in master_loggerscontrollers.index]
master_meterssensors.index=[str(i).strip() for i in master_meterssensors.index]
planon.drop_duplicates(inplace=True)
master_loggerscontrollers.drop_duplicates(inplace=True)
master_meterssensors.drop_duplicates(inplace=True)
# Split the Planon file into 2, one for loggers & controllers, and one for meters & sensors.
planon_loggerscontrollers = planon.loc[(planon['Classification Group'] == 'EN.EN4 BMS Controller') | (planon['Classification Group'] == 'EN.EN1 Data Logger')]
planon_meterssensors = planon.loc[(planon['Classification Group'] == 'EN.EN2 Energy Meter') | (planon['Classification Group'] == 'EN.EN3 Energy Sensor')]
planon_loggerscontrollers.drop_duplicates(inplace=True)
planon_meterssensors.drop_duplicates(inplace=True)
len(planon_loggerscontrollers.index[planon_loggerscontrollers.index.duplicated()])
len(planon_meterssensors.index[planon_meterssensors.index.duplicated()])
planon_meterssensors.head(3)
buildings=set(planon_meterssensors['BuildingNo.'])
buildings
len(buildings)
master_meterssensors_for_validation = \
pd.concat([master_meterssensors.loc[master_meterssensors['Building Code'] == building] \
for building in buildings])
master_meterssensors_for_validation.head(2)
#alternative method
master_meterssensors_for_validation2 = \
master_meterssensors[master_meterssensors['Building Code'].isin(buildings)]
master_meterssensors_for_validation2.head(2)
len(master_meterssensors_for_validation)
len(planon_meterssensors)-len(planon_meterssensors.index[planon_meterssensors.index.duplicated()])
master_meterssensors_for_validation.sort_index(inplace=True)
planon_meterssensors.sort_index(inplace=True)
planon_meterssensors.T
master_meterssensors_for_validation.T
#Planon:Master
meters_match_dict={
"BuildingNo.":"Building Code",
"Building":"Building Name",
"Description":"Description",
"EIS ID":"Logger Channel",
"Tenant Meter.Name":"Tenant meter",
"Fiscal Meter.Name":"Fiscal meter",
"Code":"Code"
}
master_meterssensors_for_validation_filtered=master_meterssensors_for_validation[list(meters_match_dict.values())]
planon_meterssensors_filtered=planon_meterssensors[list(meters_match_dict.keys())]
master_meterssensors_for_validation_filtered.head(2)
planon_meterssensors_filtered.head(2)
planon_meterssensors_filtered.columns=[meters_match_dict[i] for i in planon_meterssensors_filtered]
planon_meterssensors_filtered.drop_duplicates(inplace=True)
master_meterssensors_for_validation_filtered.drop_duplicates(inplace=True)
planon_meterssensors_filtered.head(2)
planon_meterssensors_filtered['Fiscal meter']=planon_meterssensors_filtered['Fiscal meter'].isin(['Yes'])
planon_meterssensors_filtered['Tenant meter']=planon_meterssensors_filtered['Tenant meter'].isin(['Yes'])
master_meterssensors_for_validation_filtered['Fiscal meter']=master_meterssensors_for_validation_filtered['Fiscal meter'].isin([1])
master_meterssensors_for_validation_filtered['Tenant meter']=master_meterssensors_for_validation_filtered['Tenant meter'].isin([1])
master_meterssensors_for_validation_filtered.head(2)
planon_meterssensors_filtered.head(2)
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
meterssensors_not_in_planon.append(i)
print('\n\nMeters in Master, but not in Planon:',
len(meterssensors_not_in_planon),'/',len(b),':',
round(len(meterssensors_not_in_planon)/len(b)*100,3),'%')
(set([i[:5] for i in meterssensors_not_in_planon]))
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
meterssensors_not_in_master.append(i)
print('\n\nMeters in Planon, not in Master:',
len(meterssensors_not_in_master),'/',len(a),':',
round(len(meterssensors_not_in_master)/len(a)*100,3),'%')
len(set([i for i in meterssensors_not_in_master]))
set([i[:9] for i in meterssensors_not_in_master])
set([i[:5] for i in meterssensors_not_in_master])
print(len(planon_meterssensors_filtered.index))
print(len(set(planon_meterssensors_filtered.index)))
print(len(master_meterssensors_for_validation_filtered.index))
print(len(set(master_meterssensors_for_validation_filtered.index)))
master_meterssensors_for_validation_filtered[master_meterssensors_for_validation_filtered.index.duplicated()]
good_index=[i for i in master_meterssensors_for_validation_filtered.index if str(i).lower().strip()!='nan']
master_meterssensors_for_validation_filtered=master_meterssensors_for_validation_filtered.loc[good_index]
master_meterssensors_for_validation_filtered.drop_duplicates(inplace=True)
len(planon_meterssensors_filtered)
len(master_meterssensors_for_validation_filtered)
comon_index=list(set(master_meterssensors_for_validation_filtered.index).intersection(set(planon_meterssensors_filtered.index)))
len(comon_index)
master_meterssensors_for_validation_intersected=master_meterssensors_for_validation_filtered.loc[comon_index].sort_index()
planon_meterssensors_intersected=planon_meterssensors_filtered.loc[comon_index].sort_index()
len(master_meterssensors_for_validation_intersected)
len(planon_meterssensors_intersected)
master_meterssensors_for_validation_intersected = master_meterssensors_for_validation_intersected[~master_meterssensors_for_validation_intersected.index.duplicated(keep='first')]
master_meterssensors_for_validation_intersected.head(2)
planon_meterssensors_intersected.head(2)
planon_meterssensors_intersected==master_meterssensors_for_validation_intersected
np.all(planon_meterssensors_intersected==master_meterssensors_for_validation_intersected)
(planon_meterssensors_intersected==master_meterssensors_for_validation_intersected).sum()
(planon_meterssensors_intersected==master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100
((planon_meterssensors_intersected==master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100).plot(kind='bar')
df=pd.DataFrame((planon_meterssensors_intersected.T==master_meterssensors_for_validation_intersected.T).sum())
df
df=pd.DataFrame((planon_meterssensors_intersected.T==master_meterssensors_for_validation_intersected.T).sum()/\
len(planon_meterssensors_intersected.T)*100)
df[df[0]<100]
sum(planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description'])
planon_meterssensors_intersected['Description']=[str(s).lower().strip()\
.replace(' ',' ').replace(' ',' ').replace('nan','')\
for s in planon_meterssensors_intersected['Description'].values]
master_meterssensors_for_validation_intersected['Description']=[str(s).lower().strip()\
.replace(' ',' ').replace(' ',' ').replace('nan','')\
for s in master_meterssensors_for_validation_intersected['Description'].values]
sum(planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description'])
for i in planon_meterssensors_intersected[planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description']].index:
print(i,'\t\tPlanon:',planon_meterssensors_intersected.loc[i]['Description'],'\t\tMaster:',master_meterssensors_for_validation_intersected.loc[i]['Description'])
sum(planon_meterssensors_intersected['Logger Channel']!=master_meterssensors_for_validation_intersected['Logger Channel'])
planon_meterssensors_intersected['Logger Channel']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in planon_meterssensors_intersected['Logger Channel'].values]
master_meterssensors_for_validation_intersected['Logger Channel']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in master_meterssensors_for_validation_intersected['Logger Channel'].values]
sum(planon_meterssensors_intersected['Logger Channel']!=master_meterssensors_for_validation_intersected['Logger Channel'])
for i in planon_meterssensors_intersected[planon_meterssensors_intersected['Logger Channel']!=master_meterssensors_for_validation_intersected['Logger Channel']].index:
print(i,'\t\tPlanon:',planon_meterssensors_intersected.loc[i]['Logger Channel'],'\t\tMaster:',master_meterssensors_for_validation_intersected.loc[i]['Logger Channel'])
(planon_meterssensors_intersected!=master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100
buildings=set(planon_loggerscontrollers['BuildingNo.'])
buildings
master_loggerscontrollers_for_validation = \
pd.concat([master_loggerscontrollers.loc[master_loggerscontrollers['Building Code'] == building] \
for building in buildings])
master_loggerscontrollers_for_validation.head(2)
len(master_loggerscontrollers_for_validation)
len(planon_loggerscontrollers)-len(planon_loggerscontrollers.index[planon_loggerscontrollers.index.duplicated()])
master_loggerscontrollers_for_validation.sort_index(inplace=True)
planon_loggerscontrollers.sort_index(inplace=True)
planon_loggerscontrollers.T
master_loggerscontrollers_for_validation.T
#Planon:Master
loggers_match_dict={
"BuildingNo.":"Building Code",
"Building":"Building Name",
"Description":"Description",
"EIS ID":"Logger Serial Number",
"Make":"Make",
"Model":"Model",
"Code":"Code"
}
master_loggerscontrollers_for_validation_filtered=master_loggerscontrollers_for_validation[list(loggers_match_dict.values())]
planon_loggerscontrollers_filtered=planon_loggerscontrollers[list(loggers_match_dict.keys())]
master_loggerscontrollers_for_validation_filtered.head(2)
planon_loggerscontrollers_filtered.head(2)
planon_loggerscontrollers_filtered.columns=[loggers_match_dict[i] for i in planon_loggerscontrollers_filtered]
planon_loggerscontrollers_filtered.drop_duplicates(inplace=True)
master_loggerscontrollers_for_validation_filtered.drop_duplicates(inplace=True)
planon_loggerscontrollers_filtered.head(2)
master_loggerscontrollers_for_validation_filtered.head(2)
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
loggerscontrollers_not_in_planon.append(i)
print('\n\nLoggers in Master, but not in Planon:',
len(loggerscontrollers_not_in_planon),'/',len(b),':',
round(len(loggerscontrollers_not_in_planon)/len(b)*100,3),'%')
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
loggerscontrollers_not_in_master.append(i)
print('\n\nLoggers in Planon, not in Master:',
len(loggerscontrollers_not_in_master),'/',len(a),':',
round(len(loggerscontrollers_not_in_master)/len(a)*100,3),'%')
print(len(planon_loggerscontrollers_filtered.index))
print(len(set(planon_loggerscontrollers_filtered.index)))
print(len(master_loggerscontrollers_for_validation_filtered.index))
print(len(set(master_loggerscontrollers_for_validation_filtered.index)))
master_loggerscontrollers_for_validation_filtered[master_loggerscontrollers_for_validation_filtered.index.duplicated()]
comon_index=list(set(master_loggerscontrollers_for_validation_filtered.index).intersection(set(planon_loggerscontrollers_filtered.index)))
master_loggerscontrollers_for_validation_intersected=master_loggerscontrollers_for_validation_filtered.loc[comon_index].sort_index()
planon_loggerscontrollers_intersected=planon_loggerscontrollers_filtered.loc[comon_index].sort_index()
master_loggerscontrollers_for_validation_intersected.head(2)
planon_loggerscontrollers_intersected.head(2)
planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected
(planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected).sum()
(planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100
((planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100).plot(kind='bar')
sum(planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name'])
planon_loggerscontrollers_intersected['Building Name']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in planon_loggerscontrollers_intersected['Building Name'].values]
master_loggerscontrollers_for_validation_intersected['Building Name']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in master_loggerscontrollers_for_validation_intersected['Building Name'].values]
sum(planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name'])
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name']].index:
print(i,'\t\tPlanon:',planon_loggerscontrollers_intersected.loc[i]['Building Name'],'\t\tMaster:',master_loggerscontrollers_for_validation_intersected.loc[i]['Building Name'])
sum(planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number'])
planon_loggerscontrollers_intersected['Logger Serial Number']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ').replace('{','').replace('}','') for s in planon_loggerscontrollers_intersected['Logger Serial Number'].values]
master_loggerscontrollers_for_validation_intersected['Logger Serial Number']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ').replace('{','').replace('}','') for s in master_loggerscontrollers_for_validation_intersected['Logger Serial Number'].values]
sum(planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number'])
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number']].index:
print(i,'\t\tPlanon:',planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'],'\t\tMaster:',master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number'])
z1=[]
z2=[]
for i in planon_loggerscontrollers_intersected.index:
if planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number']:
if float(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])==\
float(master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number']):
z1.append(str(int(float(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number']))))
z2.append(str(int(float(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number']))))
else:
z1.append(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])
z2.append(master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number'])
else:
z1.append(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])
z2.append(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])
planon_loggerscontrollers_intersected['Logger Serial Number']=z1
master_loggerscontrollers_for_validation_intersected['Logger Serial Number']=z2
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number']].index:
print(i,'\t\tPlanon:',planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'],'\t\tMaster:',master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number'])
(planon_loggerscontrollers_intersected!=master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
meterssensors_not_in_planon.append(i)
print('\n\nMeters in Master, but not in Planon:',
len(meterssensors_not_in_planon),'/',len(b),':',
round(len(meterssensors_not_in_planon)/len(b)*100,3),'%')
q1=pd.DataFrame(meterssensors_not_in_planon)
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
meterssensors_not_in_master.append(i)
print('\n\nMeters in Planon, not in Master:',
len(meterssensors_not_in_master),'/',len(a),':',
round(len(meterssensors_not_in_master)/len(a)*100,3),'%')
q2=pd.DataFrame(meterssensors_not_in_master)
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
loggerscontrollers_not_in_planon.append(i)
print('\n\nLoggers in Master, but not in Planon:',
len(loggerscontrollers_not_in_planon),'/',len(b),':',
round(len(loggerscontrollers_not_in_planon)/len(b)*100,3),'%')
q3=pd.DataFrame(loggerscontrollers_not_in_planon)
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
loggerscontrollers_not_in_master.append(i)
print('\n\nLoggers in Planon, not in Master:',
len(loggerscontrollers_not_in_master),'/',len(a),':',
round(len(loggerscontrollers_not_in_master)/len(a)*100,3),'%')
q4=pd.DataFrame(loggerscontrollers_not_in_master)
q5=pd.DataFrame((planon_meterssensors_intersected!=master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100)
q6=pd.DataFrame((planon_loggerscontrollers_intersected!=master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100)
w1=[]
for i in planon_meterssensors_intersected[planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description']].index:
w1.append({"Meter":i,'Planon':planon_meterssensors_intersected.loc[i]['Description'],
'Master':master_meterssensors_for_validation_intersected.loc[i]['Description']})
q7=pd.DataFrame(w1)
w2=[]
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name']].index:
w2.append({"Logger":i,'Planon':planon_loggerscontrollers_intersected.loc[i]['Building Name'],
'Master':master_loggerscontrollers_for_validation_intersected.loc[i]['Building Name']})
q8=pd.DataFrame(w2)
writer = pd.ExcelWriter('final5b.xlsx')
q1.to_excel(writer,'Meters Master, not Planon')
q2.to_excel(writer,'Meters Planon, not Master')
q3.to_excel(writer,'Loggers Master, not Planon')
q4.to_excel(writer,'Loggers Planon, not Master')
q5.to_excel(writer,'Meters error perc')
q6.to_excel(writer,'Loggers error perc')
q7.to_excel(writer,'Meters naming conflcits')
q1
q9=[]
try:
for i in q1[0].values:
if i[:i.find('/')] not in set(q3[0].values):
q9.append(i)
except:pass
pd.DataFrame(q9).to_excel(writer,'Meters Master, not Planon, not Logger')
q10=[]
try:
for i in q1[0].values:
if 'L82' not in i:
q10.append(i)
except:pass
pd.DataFrame(q10).to_excel(writer,'Meters Master, not Planon, not L82')
q11=[]
try:
for i in q1[0].values:
if 'MC210' not in i:
q11.append(i)
except:pass
pd.DataFrame(q11).to_excel(writer,'Meters Master, not Planon, not 210')
writer.save()
test=[]
for i in planon_meterssensors_intersected.index:
test.append(i[:9])
planon_meterssensors_intersected['test']=test
planon_meterssensors_intersected.set_index(['test','Code'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Please Download
Step2: Dealing with NaN
Step3: Standardize features
Step4: Separating training data from testing data
Step5: Using Tensorflow
Step6: Defining a Linear Estimator
Step7: Training
Step8: Evaluating
Step9: Predicting
Step10: Defining a DNN Estimator
Step11: Training
Step12: Evaluating
Step13: Predicting
Step14: Creating an Experiment
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# We're using pandas to read the CSV file. This is easy for small datasets, but for large and complex datasets,
# tensorflow parsing and processing functions are more powerful
import pandas as pd
import numpy as np
# TensorFlow
import tensorflow as tf
print('please make sure that version >= 1.2:')
print(tf.__version__)
print('@monteirom: I made changes so it also works with 1.1.0 that is the current pip install version')
print('@monteirom: The lines that were changed have @1.2 as comment')
# Layers that will define the features
#
# real_value_column: real values, float32
# sparse_column_with_hash_bucket: Use this when your sparse features are in string or integer format,
# but you don't have a vocab file that maps each value to an integer ID.
# output_id = Hash(input_feature_string) % bucket_size
# sparse_column_with_keys: Look up logic is as follows:
# lookup_id = index_of_feature_in_keys if feature in keys else default_value.
# You should use this when you know the vocab file for the feature
# one_hot_column: Creates an _OneHotColumn for a one-hot or multi-hot repr in a DNN.
# The input can be a _SparseColumn which is created by `sparse_column_with_*`
# or crossed_column functions
from tensorflow.contrib.layers import real_valued_column, sparse_column_with_keys, sparse_column_with_hash_bucket
from tensorflow.contrib.layers import one_hot_column
# The CSV file does not have a header, so we have to fill in column names.
names = [
'symboling',
'normalized-losses',
'make',
'fuel-type',
'aspiration',
'num-of-doors',
'body-style',
'drive-wheels',
'engine-location',
'wheel-base',
'length',
'width',
'height',
'curb-weight',
'engine-type',
'num-of-cylinders',
'engine-size',
'fuel-system',
'bore',
'stroke',
'compression-ratio',
'horsepower',
'peak-rpm',
'city-mpg',
'highway-mpg',
'price',
]
# We also have to specify dtypes.
dtypes = {
'symboling': np.int32,
'normalized-losses': np.float32,
'make': str,
'fuel-type': str,
'aspiration': str,
'num-of-doors': str,
'body-style': str,
'drive-wheels': str,
'engine-location': str,
'wheel-base': np.float32,
'length': np.float32,
'width': np.float32,
'height': np.float32,
'curb-weight': np.float32,
'engine-type': str,
'num-of-cylinders': str,
'engine-size': np.float32,
'fuel-system': str,
'bore': np.float32,
'stroke': np.float32,
'compression-ratio': np.float32,
'horsepower': np.float32,
'peak-rpm': np.float32,
'city-mpg': np.float32,
'highway-mpg': np.float32,
'price': np.float32,
}
# Read the file.
df = pd.read_csv('data/imports-85.data', names=names, dtype=dtypes, na_values='?')
# Some rows don't have price data, we can't use those.
df = df.dropna(axis='rows', how='any', subset=['price'])
# Fill missing values in continuous columns with zeros instead of NaN.
float_columns = [k for k,v in dtypes.items() if v == np.float32]
df[float_columns] = df[float_columns].fillna(value=0., axis='columns')
# Fill missing values in continuous columns with '' instead of NaN (NaN mixed with strings is very bad for us).
string_columns = [k for k,v in dtypes.items() if v == str]
df[string_columns] = df[string_columns].fillna(value='', axis='columns')
# We have too many variables let's just use some of them
df = df[['num-of-doors','num-of-cylinders', 'horsepower', 'make', 'price', 'length', 'height', 'width']]
# Since we're possibly dealing with parameters of different units and scales. We'll need to rescale our data.
# There are two main ways to do it:
# * Normalization, which scales all numeric variables in the range [0,1].
# Example:
# * Standardization, it will then transform it to have zero mean and unit variance.
# Example:
# Which is better? It deppends of your data and your features.
# But one disadvantage of normalization over standardization is that it loses
# some information in the data. Since normalization loses more info it can make harder
# for gradient descent to converse, so we'll use standardization.
# In practice: please analyse your data and see what gives you better results.
def std(x):
return (x - x.mean()) / x.std()
before = df.length[0]
df.length = std(df.length)
df.width = std(df.width)
df.height = std(df.height)
df.horsepower = std(df.horsepower)
after = df.length[0]
print('before:', before, 'after:', after)
TRAINING_DATA_SIZE = 160
TEST_DATA_SIZE = 10
LABEL = 'price'
# Split the data into a training set, eval set and test set
training_data = df[:TRAINING_DATA_SIZE]
eval_data = df[TRAINING_DATA_SIZE: TRAINING_DATA_SIZE + TEST_DATA_SIZE]
test_data = df[TRAINING_DATA_SIZE + TEST_DATA_SIZE:]
# Separate input features from labels
training_label = training_data.pop(LABEL)
eval_label = eval_data.pop(LABEL)
test_label = test_data.pop(LABEL)
BATCH_SIZE = 64
# Make input function for training:
# num_epochs=None -> will cycle through input data forever
# shuffle=True -> randomize order of input data
training_input_fn = tf.estimator.inputs.pandas_input_fn(x=training_data,
y=training_label,
batch_size=BATCH_SIZE,
shuffle=True,
num_epochs=None)
# Make input function for evaluation:
# shuffle=False -> do not randomize input data
eval_input_fn = tf.estimator.inputs.pandas_input_fn(x=eval_data,
y=eval_label,
batch_size=BATCH_SIZE,
shuffle=False)
# Make input function for testing:
# shuffle=False -> do not randomize input data
eval_input_fn = tf.estimator.inputs.pandas_input_fn(x=test_data,
y=test_label,
batch_size=1,
shuffle=False)
# Describe how the model should interpret the inputs. The names of the feature columns have to match the names
# of the series in the dataframe.
# @1.2.0 tf.feature_column.numeric_column -> tf.contrib.layers.real_valued_column
horsepower = real_valued_column('horsepower')
width = real_valued_column('width')
height = real_valued_column('height')
length = real_valued_column('length')
# @1.2.0 tf.feature_column.categorical_column_with_hash_bucket -> tf.contrib.layers.sparse_column_with_hash_bucket
make = sparse_column_with_hash_bucket('make', 50)
# @1.2.0 tf.feature_column.categorical_column_with_vocabulary_list -> tf.contrib.layers.sparse_column_with_keys
fuel_type = sparse_column_with_keys('fuel-type', keys=['diesel', 'gas'])
num_of_doors = sparse_column_with_keys('num-of-doors', keys=['two', 'four'])
num_of_cylinders = sparse_column_with_keys('num-of-cylinders', ['eight', 'five', 'four', 'six', 'three', 'twelve', 'two'])
linear_features = [horsepower, make, num_of_doors, num_of_cylinders, length, width, height]
regressor = tf.contrib.learn.LinearRegressor(feature_columns=linear_features, model_dir='tensorboard/linear_regressor/')
regressor.fit(input_fn=training_input_fn, steps=10000)
regressor.evaluate(input_fn=eval_input_fn)
preds = list(regressor.predict(input_fn=eval_input_fn))
for i in range(TEST_DATA_SIZE):
print('prediction:', preds[i], 'real value:', test_label.iloc[i])
# @1.2.0 tf.feature_column.indicator_column -> tf.contrib.layers.one_hot_column(tf.contrib.layers.sparse_column_with_keys(...))
dnn_features = [
#numerical features
length, width, height, horsepower,
# densify categorical features:
one_hot_column(make),
one_hot_column(num_of_doors)
]
dnnregressor = tf.contrib.learn.DNNRegressor(feature_columns=dnn_features,
hidden_units=[50, 30, 10], model_dir='tensorboard/DNN_regressor/')
dnnregressor.fit(input_fn=training_input_fn, steps=10000)
dnnregressor.evaluate(input_fn=eval_input_fn)
preds = list(dnnregressor.predict(input_fn=eval_input_fn))
for i in range(TEST_DATA_SIZE):
print('prediction:', preds[i], 'real value:', test_label.iloc[i])
# @1.2.0 experiment_fn(run_config, params) - > experiment_fn(output_dir)
def experiment_fn(output_dir):
# This function makes an Experiment, containing an Estimator and inputs for training and evaluation.
# You can use params and config here to customize the Estimator depending on the cluster or to use
# hyperparameter tuning.
# Collect information for training
# @1.2.0 config=run_config -> ''
return tf.contrib.learn.Experiment(estimator=tf.contrib.learn.LinearRegressor(
feature_columns=linear_features, model_dir=output_dir),
train_input_fn=training_input_fn,
train_steps=10000,
eval_input_fn=eval_input_fn)
import shutil
# @1.2.0 tf.contrib.learn.learn_runner(exp, run_config=tf.contrib.learn.RunConfig(model_dir="/tmp/output_dir")
# -> tf.contrib.learn.python.learn.learm_runner.run(exp, output_dir='/tmp/output_dir')
shutil.rmtree("/tmp/output_dir", ignore_errors=True)
from tensorflow.contrib.learn.python.learn import learn_runner
learn_runner.run(experiment_fn, output_dir='/tmp/output_dir')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This simple function takes a value, adds 2 to it, and returns that new value.
Step2: Then you can set the inputs and run just as you would with any other interface
Step3: You need to be careful that the name of the input paramter to the node is the same name as the input parameter to the function, i.e. x_input. But you don't have to specify input_names or output_names. You can also just use
Step4: Using External Packages
Step5: Without explicitly importing Nibabel in the body of the function, this would fail.
Step6: Unlike when using a function object, this input can be set like any other, meaning that you could write a function that outputs different function strings depending on some run-time contingencies, and connect that output the function_str input of a downstream Function interface.
Step7: Now, let's see what happens if we move the import of random outside the scope of get_random_array
|
<ASSISTANT_TASK:>
Python Code:
# Create a small example function
def add_two(x_input):
return x_input + 2
# Import Node and Function module
from nipype import Node, Function
# Create Node
addtwo = Node(Function(input_names=["x_input"],
output_names=["val_output"],
function=add_two),
name='add_node')
addtwo.inputs.x_input = 4
addtwo.run()
addtwo.result.outputs
addtwo = Node(Function(function=add_two), name='add_node')
addtwo.inputs.x_input = 8
addtwo.run()
addtwo.result.outputs
def get_n_trs(in_file):
import nibabel
f = nibabel.load(in_file)
return f.shape[-1]
add_two_str = "def add_two(val):\n return val + 2\n"
from nipype import Node, Function
# Create the Function object
def get_random_array(array_shape):
# Import random function
from numpy.random import random
return random(array_shape)
# Create Function Node that executes get_random_array
rndArray = Node(Function(input_names=["array_shape"],
output_names=["random_array"],
function=get_random_array),
name='rndArray_node')
# Specify the array_shape of the random array
rndArray.inputs.array_shape = (3, 3)
# Run node
rndArray.run()
# Print output
print(rndArray.result.outputs)
from nipype import Node, Function
# Import random function
from numpy.random import random
# Create the Function object
def get_random_array(array_shape):
return random(array_shape)
# Create Function Node that executes get_random_array
rndArray = Node(Function(input_names=["array_shape"],
output_names=["random_array"],
function=get_random_array),
name='rndArray_node')
# Specify the array_shape of the random array
rndArray.inputs.array_shape = (3, 3)
# Run node
try:
rndArray.run()
except Exception as err:
print(err)
else:
raise
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the data
Step2: Store the result of dropping the label column from the dataframe as the input data $X$. In order to preserve the number of training examples, impute the missing values as the average values of their respective column from the input matrix.
Step3: Model Description
Step4: Model Description
Step5: Model Decription
Step6: Why is there so much overlap between the two sets? Need more features? Don't normalize the data?
|
<ASSISTANT_TASK:>
Python Code:
import os
import pandas as pd
import time
from tqdm import tqdm
import glob
import numpy as np
from utils.input_pipeline import *
#TODO: Try the variational approach instead.
#TODO: instead of solving for a small dimension representation, then traiing a RF on this...make the dims larger and try a linear model instead?
def data_gen(file_path, batch_steps,categorical=False, sample_size=None, features_list=None, mode=None, conformation=None):
#decide upon receptor versus protein for naming conventions
receptor_list = list(h5py.File(file_path,'r'))
while(1):
random.shuffle(receptor_list)
X,y = load_protein(file_path, protein_name=receptor_list[0], sample_size=None,
features_list=features_list,mode=mode, conformation=conformation)
X = Normalizer().fit_transform(Imputer(strategy="median").fit_transform(np.nan_to_num(X)))
y = y.flatten()
positives = X[y==1,:]
negatives = X[y==0,:]
for step in range(batch_steps):
negatives_to_keep = np.random.choice(negatives.shape[0],sample_size,replace = True)
X_batch = np.vstack((negatives[negatives_to_keep],positives))
X_batch = np.vstack((X_batch,positives))
y_batch = np.hstack((y[y==0][negatives_to_keep],y[y==1]))
y_batch = np.hstack((y_batch,y[y==1]))
if categorical is True:
yield X_batch, to_categorical(y_batch)
else:
yield X_batch, y_batch
with open("data/preprocessed_features.csv", "r") as input_file:
feature_list = []
for line in input_file:
line = line.strip('\n')
feature_list.append(line)
print(len(feature_list))
print("loading data...")
t0 = time.time()
X,y = load_data("data/full_26_kinase_data.h5",features_list=feature_list)
t1 = time.time()
print("data loaded in",(t1-t0),"seconds.")
from sklearn.preprocessing import Imputer
X = Imputer().fit_transform(X)
from sklearn.preprocessing import normalize
X = normalize(X)
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)
import tensorflow as tf
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, Dropout
from keras.layers.advanced_activations import PReLU
from keras import optimizers, objectives, regularizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import mean_squared_error
K.set_session(tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=10,
intra_op_parallelism_threads=10)))
num_epochs = 10
encoding_dim = 460
learning_rate = 1e-3
input_data = Input(shape=(X.shape[1],))
alpha = 'glorot_uniform'
encoded = Dense(int(np.ceil(X.shape[1]/2)),kernel_regularizer=regularizers.l2(0.01))(input_data)
encoded = PReLU(alpha_initializer=alpha)(encoded)
encoded = Dense(int(np.ceil(X.shape[1]/16)))(encoded)
encoded = PReLU(alpha_initializer=alpha)(encoded)
encoded = Dense(int(np.ceil(X.shape[1]/16)))(encoded)
encoded = PReLU(alpha_initializer=alpha)(encoded)
encoded = Dense(encoding_dim)(encoded)
encoded = Dropout(0.5)(encoded)
encoded = PReLU(alpha_initializer=alpha)(encoded)
decoded = Dense(int(np.ceil(X.shape[1]/16)))(encoded)
decoded = PReLU(alpha_initializer=alpha)(decoded)
decoded = Dense(int(np.ceil(X.shape[1]/16)))(decoded)
decoded = PReLU(alpha_initializer=alpha)(decoded)
decoded = Dense(int(np.ceil(X.shape[1]/16)))(decoded)
decoded = PReLU(alpha_initializer=alpha)(decoded)
decoded = Dense(X.shape[1])(decoded)
encoder = Model(input_data, encoded)
print (encoder.summary())
autoencoder = Model(input_data, decoded)
print (autoencoder.summary())
encoded_input = Input(shape=(encoding_dim,))
decoder = Model(encoded_input,autoencoder.layers[-1](autoencoder.layers[-2](autoencoder.layers[-3](autoencoder.layers[-4](
autoencoder.layers[-5](autoencoder.layers[-6](autoencoder.layers[-7](autoencoder.layers[-8](encoded_input)))))))))
print (decoder.summary())
autoencoder.compile(optimizer=optimizers.adam(lr=1e-4),loss=objectives.mean_squared_error,metrics=['mse'])
from keras.callbacks import TensorBoard
batch_size = 10
autoencoder.fit(X_train,X_train,epochs=1,batch_size=batch_size,shuffle=True,verbose=1,callbacks=[TensorBoard(log_dir='logs/'+str(time.time()), histogram_freq=1, write_graph=True, write_grads=True)])
#with K.get_session():
#still need to use train/test splits to properly evaluate performance
enc_red_x = encoder.predict(X_test)
enc_recon_x = decoder.predict(enc_red_x)
enc_error = mean_squared_error(X_test,enc_recon_x)
print("Autoencoder Reconstruction Error: ",enc_error)
import matplotlib.pyplot as plt
#from scipy.stats import gaussian_kde
import seaborn as sns
sns.kdeplot(enc_red_x.flatten()[y_test.flatten()==0],shade=True, color="r")
sns.kdeplot(enc_red_x.flatten()[y_test.flatten()==1],shade=True, color="b")
plt.show()
output_features = pd.DataFrame({"features":encoder.predict(X),"labels":y})
print(output_features.shape)
#output_features.to_csv("preprocessed_features_autoencoder.csv",header=True,index=False)
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, f1_score
enc_x_train,enc_x_test,enc_y_train,enc_y_test = train_test_split(encoder.predict(X),y.flatten(),stratify=y.flatten()
,test_size=0.2)
train_f1_list = []
test_f1_list = []
f1_list = []
for i in range(1,20):
rforest= RandomForestClassifier(n_estimators = i,n_jobs=10)
rforest.fit(enc_x_train,enc_y_train)
enc_preds_train = rforest.predict(enc_x_train)
enc_preds_test = rforest.predict(enc_x_test)
test_f1_list.append(f1_score(enc_preds_test,enc_y_test))
train_f1_list.append(f1_score(enc_preds_train,enc_y_train))
import matplotlib.pyplot as plt
plt.plot(train_f1_list,label="train_f1")
plt.plot(test_f1_list,label="test_f1")
plt.show()
enc_preds_train = rforest.predict(enc_x_train)
enc_accuacy_train = accuracy_score(enc_preds_train,enc_y_train)
enc_f1_train = f1_score(enc_preds_train,enc_y_train)
print("train\t enc_accuracy",enc_accuacy_train,"\tenc_f1-score",enc_f1_train)
enc_preds_test = rforest.predict(enc_x_test)
enc_accuacy_test = accuracy_score(enc_preds_test,enc_y_test)
enc_f1_test = f1_score(enc_preds_test,enc_y_test)
print("test\t enc_accuracy",enc_accuacy_test,"\tenc_f1-score",enc_f1_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then, read the (sample) input tables for blocking purposes.
Step2: Generating Features for Blocking
Step3: Different Ways to Block Using Rule Based Blocker
Step4: Block Candidate Set
Step5: Block Two tuples To Check If a Tuple Pair Would Get Blocked
|
<ASSISTANT_TASK:>
Python Code:
# Import py_entitymatching package
import py_entitymatching as em
import os
import pandas as pd
# Get the datasets directory
datasets_dir = em.get_install_path() + os.sep + 'datasets'
# Get the paths of the input tables
path_A = datasets_dir + os.sep + 'person_table_A.csv'
path_B = datasets_dir + os.sep + 'person_table_B.csv'
# Read the CSV files and set 'ID' as the key attribute
A = em.read_csv_metadata(path_A, key='ID')
B = em.read_csv_metadata(path_B, key='ID')
A.head()
B.head()
block_f = em.get_features_for_blocking(A, B, validate_inferred_attr_types=False)
block_f
em._block_c['corres']
em._atypes1['birth_year'], em._atypes1['hourly_wage'], em._atypes1['name'], em._atypes1['zipcode']
em._atypes2['birth_year'], em._atypes2['hourly_wage'], em._atypes2['name'], em._atypes2['zipcode']
rb = em.RuleBasedBlocker()
# Add rule : block tuples if name_name_lev(ltuple, rtuple) < 0.4
rb.add_rule(['name_name_lev_sim(ltuple, rtuple) < 0.4'], block_f)
C = rb.block_tables(A, B, l_output_attrs=['name', 'address'], r_output_attrs=['name', 'address'], show_progress=False)
C.head()
rb = em.RuleBasedBlocker()
rb.add_rule(['birth_year_birth_year_exm(ltuple, rtuple) == 0'], block_f)
D = rb.block_candset(C, show_progress=False)
D.head()
A.loc[[0]]
B.loc[[1]]
rb = em.RuleBasedBlocker()
# Add rule : block tuples if name_name_lev(ltuple, rtuple) < 0.4
rb.add_rule(['name_name_lev_sim(ltuple, rtuple) < 0.4'], block_f)
rb.add_rule(['birth_year_birth_year_exm(ltuple, rtuple) == 0'], block_f)
status = rb.block_tuples(A.loc[0], B.loc[0])
print(status)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: DB migration/setup
Step8: Processing
Step9: Results
|
<ASSISTANT_TASK:>
Python Code:
import psycopg2
from psycopg2.extras import RealDictCursor
import pandas as pd
# import geopandas as gpd
# from shapely import wkb
# from shapely.geometry import mapping as to_geojson
# import folium
pd.options.display.max_columns = None
pd.options.display.max_rows = None
#pd.set_option('display.float_format', lambda x: '%.3f' % x)
%matplotlib inline
conn = psycopg2.connect('service=firecares')
nfirs = psycopg2.connect('service=nfirs')
# Create materialized view of all usgs govt units in FireCARES
q =
create materialized view if not exists usgs_governmentunits as
(
select id, population, county_name as name, 'countyorequivalent' as source, geom from usgs_countyorequivalent where geom is not null
union
select id, population, place_name as name, 'incorporatedplace' as source, geom from usgs_incorporatedplace where geom is not null
union
select id, population, minorcivildivision_name as name, 'minorcivildivision' as source, geom from usgs_minorcivildivision where geom is not null
union
select id, population, name, 'nativeamericanarea' as source, geom from usgs_nativeamericanarea where geom is not null
union
select id, 0 as population, name, 'reserve' as source, geom from usgs_reserve where geom is not null
union
select id, population, state_name as name, 'stateorterritoryhigh' as source, geom from usgs_stateorterritoryhigh where geom is not null
union
select id, population, place_name as name, 'unincorporatedplace' as source, geom from usgs_unincorporatedplace where geom is not null
);
create unique index on usgs_governmentunits (id, source);
create index on usgs_governmentunits using gist (geom);
with conn.cursor() as c:
c.execute(q)
conn.commit()
# Link remote firecares usgs_governmentunits view to nfirs-local usgs_government units
q =
create foreign table usgs_governmentunits (id integer, population integer, name character varying(120), source text, geom geometry)
server firecares
options (table_name 'usgs_governmentunits');
with nfirs.cursor() as c:
c.execute(q)
nfirs.commit()
# Old nfirs.firestation_firedepartment foreign table columns needed to be synced
q =
alter foreign TABLE firestation_firedepartment add column archived boolean NOT NULL;
alter foreign TABLE firestation_firedepartment add column domain_name character varying(255);
alter foreign TABLE firestation_firedepartment add column owned_tracts_geom public.geometry(MultiPolygon,4326);
alter foreign TABLE firestation_firedepartment add column display_metrics boolean NOT NULL;
alter foreign TABLE firestation_firedepartment add column boundary_verified boolean NOT NULL;
alter foreign TABLE firestation_firedepartment add column cfai_accredited boolean NOT NULL;
alter foreign TABLE firestation_firedepartment add column ems_transport boolean NOT NULL;
alter foreign TABLE firestation_firedepartment add column staffing_verified boolean NOT NULL;
alter foreign TABLE firestation_firedepartment add column stations_verified boolean NOT NULL;
alter foreign TABLE firestation_firedepartment add column census_override boolean NOT NULL;
alter foreign TABLE firestation_firedepartment add column additional_fdids character varying(255);
with nfirs.cursor() as c:
c.execute(q)
nfirs.commit()
q =
create foreign table if not exists firecares_core_address (id integer NOT NULL,
address_line1 character varying(100) NOT NULL,
address_line2 character varying(100),
city character varying(50) NOT NULL,
state_province character varying(40) NOT NULL,
postal_code character varying(10) NOT NULL,
geom public.geometry(Point,4326),
geocode_results text,
country_id character varying(2) NOT NULL)
server firecares
options (table_name 'firecares_core_address');
with nfirs.cursor() as c:
c.execute(q)
nfirs.commit()
q =
select id, fdid, state, name
from firestation_firedepartment
where boundary_verified = true;
with nfirs.cursor(cursor_factory=RealDictCursor) as c:
c.execute(q)
fds = c.fetchall()
q =
with fires as (select * from joint_buildingfires
inner join joint_incidentaddress
using (fdid, inc_no, inc_date, state, exp_no)
where state = %(state)s and fdid = %(fdid)s
),
govt_units as (
select gu.name, gu.source, gu.id, gu.geom, fd.id as fc_id, fd.geom as fd_geom, ST_Distance(addr.geom, ST_Centroid(gu.geom)) as distance_to_headquarters
from firestation_firedepartment fd
inner join firecares_core_address addr
on addr.id = fd.headquarters_address_id
join usgs_governmentunits gu
on ST_Intersects(ST_Buffer(addr.geom, 0.05), gu.geom)
where
fd.fdid = %(fdid)s and fd.state = %(state)s and source != 'stateorterritoryhigh'
)
select gu.fc_id, count(fires) / ST_Area(gu.geom) as density, count(fires), ST_Area(ST_SymDifference(gu.fd_geom, gu.geom)) / ST_Area(gu.fd_geom) as percent_difference_to_verified_boundary, ST_Area(gu.geom), gu.distance_to_headquarters, gu.name, gu.id, gu.source from fires
inner join govt_units gu
on ST_Intersects(fires.geom, gu.geom)
group by gu.name, gu.id, gu.geom, gu.source, gu.distance_to_headquarters, gu.fd_geom, gu.fc_id
order by ST_Area(gu.geom) / count(fires) asc;
for fd in fds:
with nfirs.cursor(cursor_factory=RealDictCursor) as c:
print 'Analyzing: {} (id: {} fdid: {} {})'.format(fd['name'], fd['id'], fd['fdid'], fd['state'])
c.execute(q, dict(fdid=fd['fdid'], state=fd['state']))
items = c.fetchall()
df = pd.DataFrame(items)
df.to_csv('./boundary-analysis-{}.csv'.format(fd['id']))
from glob import glob
df = None
for f in glob("boundary-analysis*.csv"):
if df is not None:
df = df.append(pd.read_csv(f))
else:
df = pd.read_csv(f)
df.rename(columns={'Unnamed: 0': 'rank'}, inplace=True)
selected_government_units = df[df['rank'] == 0].set_index('fc_id')
total_validated_department_count = len(selected_government_units)
perfect_fits = len(selected_government_units[selected_government_units['percent_difference_to_verified_boundary'] == 0])
print 'Perfect fits: {}/{} ({:.2%})'.format(perfect_fits, total_validated_department_count, float(perfect_fits) / total_validated_department_count)
print 'Machine-selected government unit area difference mean: {:.2%}'.format(df[df['rank'] == 0].percent_difference_to_verified_boundary.mean())
selected_government_units['percent_difference_to_verified_boundary'].hist(bins=50)
selected_government_units
df.set_index('fc_id')
df.to_csv('./validated-boundary-vs-government-unit-density.csv')
pd.read_csv('./validated-boundary-vs-government-unit-density.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This notebook is about ways to model data using analytic distributions. I start with the exponential distribution, which is often a good model of time between random arrivals.
Step2: Here's the babyboom data
Step3: And here's the CDF of interarrival times.
Step4: Visually it looks like an exponential CDF, but there are other analytic distributions that also look like this. A stronger test is to plot the complementary CDF, that is $1-CDF(x)$ on a log-y scale.
Step5: It is not exactly straight, which indicates that the exponential distribution is not a perfect model for this data. Most likely the underlying assumption—that a birth is equally likely at any time of day—is not exactly true. Nevertheless, it might be reasonable to model this dataset with an exponential distribution.
Step6: We might expect the distribution of birth weights to be approximately normal. I'll load data from the NSFG again
Step7: We can estimate the parameters of the normal distribution, mu and sigma, then plot the data on top of the analytic model
Step9: The data fit the model well, but there are some deviations in the lower tail.
Step10: If the data are normal, the result should be a straight line.
Step11: The normal probability plot shows that the lightest babies are lighter than expected, starting about two standard deviations below the mean. Also, the heaviest babies are heavier than the model predicts.
Step12: As expected, the normal model is a better fit for full-term babies at the low end of the distribution.
Step14: This function generates normal probability plots
Step15: The normal distribution is a poor model for the distribution of adult weights.
Step16: But if we compute the log of adult weights, the normal distribution is much better.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
import analytic
import brfss
import nsfg
import thinkstats2
import thinkplot
import pandas as pd
import numpy as np
import math
%matplotlib inline
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r'$\lambda=%g$' % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Config(title='Exponential CDF',
xlabel='x',
ylabel='CDF',
loc='lower right')
df = analytic.ReadBabyBoom()
df
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='minutes',
ylabel='CDF',
legend=False)
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='minutes',
ylabel='CCDF',
yscale='log',
legend=False)
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma,
low=-1.0, high=4.0)
label = r'$\mu=%g$, $\sigma=%g$' % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Config(title='Normal CDF',
xlabel='x',
ylabel='CDF',
loc=2)
preg = nsfg.ReadFemPreg()
weights = preg.totalwgt_lb.dropna()
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print('Mean, Var', mu, var)
# plot the model
sigma = math.sqrt(var)
print('Sigma', sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label='model', color='orange')
# plot the data
cdf = thinkstats2.Cdf(weights, label='data')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Config(title='Birth weights',
xlabel='birth weight (lbs)',
ylabel='CDF',
legend=True)
def NormalProbability(ys, jitter=0.0):
Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xlim = [-4.5, 4.5]
fxs, fys = thinkstats2.FitLine(xlim, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
thinkplot.Config(title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)',
legend=True, loc='lower right',
xlim=xlim)
full_term = preg[preg.prglngth >= 37]
term_weights = full_term.totalwgt_lb.dropna()
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
xs, ys = NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label='full term')
thinkplot.Config(title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)',
legend=True, loc='lower right',
xlim=xlim)
df = brfss.ReadBrfss()
weights = df.wtkg2.dropna()
log_weights = np.log10(weights)
def MakeNormalPlot(weights):
Generates a normal probability plot of birth weights.
weights: sequence
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-5, 5]
xs, ys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(xs, ys, color='0.8', label='model')
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='weights')
MakeNormalPlot(weights)
thinkplot.Config(xlabel='z', ylabel='weights (kg)')
MakeNormalPlot(log_weights)
thinkplot.Config(xlabel='z', ylabel='weights (log10 kg)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, it is time to covert dates from 'object' to 'date' format
Step2: Status of the flight
Step3: The result, so far
Step4: Some EDA (tests)
Step5: 2.2 - Merge Flights with local airports (list with all the ~600 brazilian public airports) and identify the local flights
Step6: --- Here I got stuck ---
Step7: Moving on...
|
<ASSISTANT_TASK:>
Python Code:
from datetime import datetime
# Pandas and NumPy
import pandas as pd
import numpy as np
# Read the CSV with flights records (separation = ";")
flights = pd.read_csv('data/arfsample-date.csv', sep = ';', dtype = str)
flights.head()
# Lambda function
# 1 - Used to adjust date columns to ISO format
# flights['column-with-date'] = flights[['column-with-date']].apply(lambda row: \
# row.str.replace("(?P<day>\d{2})/\
# (?P<month>\d{2})/\
# (?P<year>\d{4}) \
# (?P<HOUR>\d{2}):\
# (?P<MIN>\d{2})", \
# "\g<year>/\g<month>/\g<day> \g<HOUR>:\g<MIN>:00"), axis=1)
#
# 2- Used to translate to english the 'flight-status':
# - 'REALIZADO'-> 'ACCOMPLISHED'
# - 'CANCELADO'-> 'CANCELED'
# ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
flights['departure-est'] = flights[['departure-est']].apply(lambda row: row.str.replace("(?P<day>\d{2})/(?P<month>\d{2})/(?P<year>\d{4}) (?P<HOUR>\d{2}):(?P<MIN>\d{2})", "\g<year>/\g<month>/\g<day> \g<HOUR>:\g<MIN>:00"), axis=1)
flights['departure-real'] = flights[['departure-real']].apply(lambda row: row.str.replace("(?P<day>\d{2})/(?P<month>\d{2})/(?P<year>\d{4}) (?P<HOUR>\d{2}):(?P<MIN>\d{2})", "\g<year>/\g<month>/\g<day> \g<HOUR>:\g<MIN>:00"), axis=1)
flights['arrival-est'] = flights[['arrival-est']].apply(lambda row: row.str.replace("(?P<day>\d{2})/(?P<month>\d{2})/(?P<year>\d{4}) (?P<HOUR>\d{2}):(?P<MIN>\d{2})", "\g<year>/\g<month>/\g<day> \g<HOUR>:\g<MIN>:00"), axis=1)
flights['arrival-real'] = flights[['arrival-real']].apply(lambda row: row.str.replace("(?P<day>\d{2})/(?P<month>\d{2})/(?P<year>\d{4}) (?P<HOUR>\d{2}):(?P<MIN>\d{2})", "\g<year>/\g<month>/\g<day> \g<HOUR>:\g<MIN>:00"), axis=1)
flights.dtypes
# Departure and Arrival columns: from 'object' to 'date' format
flights['departure-est'] = pd.to_datetime(flights['departure-est'], errors='ignore')
flights['departure-real'] = pd.to_datetime(flights['departure-real'], errors='ignore')
flights['arrival-est'] = pd.to_datetime(flights['arrival-est'], errors='ignore')
flights['arrival-real'] = pd.to_datetime(flights['arrival-real'], errors='ignore')
flights.dtypes
# translate the flight status from portuguese to english
flights['flight-status'] = flights[['flight-status']].apply(lambda row: row.str.replace("REALIZADO", "ACCOMPLISHED"), axis=1)
flights['flight-status'] = flights[['flight-status']].apply(lambda row: row.str.replace("CANCELADO", "CANCELED"), axis=1)
flights[['departure-est', 'departure-real', 'arrival-est', 'arrival-real', 'flight-status']].head()
flights.groupby(['airport-A']).size().reset_index(name='number_departures')
# Airports dataset: all brazilian public airports (updated until october, 2017)
airports = pd.read_csv('data/brazilianPublicAirports-out2017.csv', sep = ';')
airports.head()
# Merge "flights" dataset with "airports" in order to identify
# local flights (origin and destination are in Brazil)
flights = pd.merge(flights, airports, left_on="airport-A", right_on="airport", how='left')
flights = pd.merge(flights, airports, left_on="airport-B", right_on="airport", how='left')
flights.head()
# Create a new column 'local_flight' with '0' value as default
flights["local_flight"] = 0
flights.airport_x = flights.airport_x.fillna('')
flights.airport_y = flights.airport_y.fillna('')
flights[["airport_x", "airport_y", "local_flight"]]
for index,row in flights.iterrows():
if ((row['airport_x'] != '') & (row['airport_y'] != '')):
flights['local_flight'] = 1
flights[['airport_x', 'airport_y', 'local_flight']]
# Weather sample: load the CSV with weather historical data (from Campinas, SP, Brazil, 2016)
weather = pd.read_csv('data/DataScience-Intensive-weatherAtCampinasAirport-2016-Campinas_Airport_2016Weather.csv', \
sep = ',', dtype = str)
weather.head()
weather.dtypes
weather["date"] = weather["year"].map(str) + "-" + weather["month"].map(str) + "-" + weather["day"].map(str)
weather["date"] = pd.to_datetime(weather['date'],errors='ignore')
weather.dtypes
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You create a spectrum object by passing in the flux and the dispersion values. As positional arguments flux is first. If your dispersion axis has been wavelenght calibrated (its a wavelength rather than pixel positions) then you need to also pass in calibrated=True keyword.
Step2: If you need access to the dispersion axis or flux you can do it by the flux and xaxis attributes.
Step3: Spectrum has a wave_select metod which allows you to select a section of you spectra between an lower and upper dispersion coordinate(pixel/wavelenght).
Step4: Overloaded Operators
Step6: Practical Example - Telluric Correction
Step7: Now the telluric model wavelength spans a much longer wavelength than we have observed.
Step8: There can a difference in airmass between the observation and the model which affects the line depth.
Step9: Now we want to correct for the atmospheric absorption be dividing the observation by the telluric model.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
from spectrum_overload import Spectrum
from astropy.io import fits
import copy
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(2000, 2050)
y = np.random.rand(len(x))
spec_uncalibrated = Spectrum(y, x)
spec_calibrated = Spectrum(flux=y, xaxis=x, calibrated=True)
wavelength = spec_uncalibrated.xaxis
I = spec_uncalibrated.flux
#spec_calibrated.wav_select(lower, upper)
spec_calibrated.wav_select(2030, 2070)
spec1 = Spectrum(y, x, calibrated=True)
spec2 = copy.copy(spec1) # Duplicate to easily see result. mainly spec1-spec2=0, spec1/spec2=1
# this is still to operate.
add = spec1 + spec2
subtract = spec1 - spec2
multiply = spec1 * spec2
divide = spec1 / spec2
print("add", add.flux)
print("subtract", subtract.flux)
print("multiply", multiply.flux)
print("divide", divide.flux)
# A function to read header and data from tapas ipac file.
def load_telluric(filename):
Returns telluric data and header
if just want the data then call as load_telluric()[0]
or data, __ = load_telluric()
likewise just the header as hdr = load_telluric()[1]
ext = filename.split(".")[-1]
if ext == "ipac":
tell_hdr = fits.Header()
with open(filename) as f:
col1 = []
col2 = []
for line in f:
#firstchar = line[0]
#print("first char =", firstchar)
if line.startswith("\\"):
# Get the Tapas Header
line = line[1:] # remove the leading \
line = line.strip()
items = line.split("=")
tell_hdr[items[0]] = items[1] # Add to header
elif line.startswith("|"):
# Obtian wavelength scale from piped lines
if "in air" in line:
tell_hdr["WAVSCALE"] = "air"
elif "nm|" in line:
tell_hdr["WAVSCALE"] = "vacuum"
# Need extra condition to deal with wavenumber
else:
line = line.strip()
val1, val2 = line.split()
col1.append(float(val1))
col2.append(float(val2))
elif ext == "fits":
i_tell = fits.getdata(filename, 1)
tell_hdr = fits.getheader(filename, 1)
# TODO ... Need to get wavelenght scale (air/wavelenght) from fits file somehow...
col1 = i_tell["wavelength"]
col2 = i_tell["transmittance"]
else:
print(" Could not load file", filename," with extention", ext)
return None
# put in ascending order
if col1[-1]-col1[0] < 0: # wl is backwards
col1 = col1[::-1]
col2 = col2[::-1]
tell_data = np.array([col1, col2], dtype="float64")
return tell_data, tell_hdr
## Load in the spectra
# Using already wavelength calibrated slectrum
obsname = "../spectrum_overload/data/spec_wavecal.fits"
tellname = "../spectrum_overload/data/telluric_data.ipac"
# Load in the data
obs_data = fits.getdata(obsname)
obs_hdr = fits.getheader(obsname)
telluric_data, telluric_header = load_telluric(tellname)
## Put data into Spectrum object
# Can check for column names using print(obs.columns)
# "Wavelength" and "Extracted_DRACS" are the columns of the fits table
observation = Spectrum(obs_data["Extracted_DRACS"], obs_data["Wavelength"], calibrated=True, header=obs_hdr)
telluric = Spectrum(telluric_data[1], telluric_data[0], calibrated=True, header=telluric_header)
# Shorten telluric spectra to just around our observation (1 nm either side)
print("Before ", len(telluric))
telluric.wav_select(min(observation.xaxis)-1, max(observation.xaxis)+1)
print("After ", len(telluric))
# Plot the two Spectrum to see what we have
plt.figure()
plt.plot(telluric.xaxis, telluric.flux, label="Telluric")
plt.plot(observation.xaxis, observation.flux, "r--", label="Observation")
plt.xlabel("Wavelength (nm)")
plt.ylabel("Flux/Absorption")
#plt.xlim([np.min(observation.xaxis), np.max(observation.xaxis)]) # Veiw this detector only
plt.legend(loc=0)
plt.show()
B = observation.header["HIERARCH ESO TEL AIRM END"] / float(telluric.header["airmass"]) # Should probably average airmass of observation (Not just End as used here)
print("Airmass Ratio = {}".format(B))
scaled_telluric = telluric ** B
# Warning! When I attempted a division before applying the telluric.wav_select() above I obtained a MemoryError in the interpolation
# This needs to be looked into (it takes a while to run atm)
Corrected = observation / scaled_telluric
# Plot Result
plt.plot(observation.xaxis, observation.flux, label="Observation")
plt.plot(Corrected.xaxis, Corrected.flux, "r--", label="Corrected")
plt.xlabel("Wavelength (nm)")
plt.ylabel("Flux/Absorption")
plt.xlim([np.min(observation.xaxis), np.max(observation.xaxis)]) # Veiw this detector only
plt.legend(loc=0)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1 Create a Google Form with these questions
Step2: Step 2
Step3: Step 3
|
<ASSISTANT_TASK:>
Python Code:
!pip install nxpd
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import numpy as np
from operator import truediv
from collections import Counter
import itertools
import random
import collaboratr
#from nxpd import draw
#import nxpd
#reload(collaboratr)
def format_name(data):
first_name = ['-'.join(list(map(str.capitalize,d))) for d in data['Name'].str.replace(" ", "-").str.split('-')]
last_name = ['-'.join(list(map(str.capitalize,d))) for d in data['Last'].str.replace(" ", "-").str.split('-')]
full_name = pd.Series([m+" "+n for m,n in zip(first_name,last_name)])
return full_name
# Retrieve data from Google Sheet and parse using pandas dataframe
student_data = pd.read_csv("students.csv")
student_data = student_data.replace(np.nan,' ', regex=True)
# Store student information in variables.
#
# Collaboratr divided people into "learners" and "teachers" based on what they wanted to "learn" and "teach."
# Here, students are always "learners" by default and the scientists are always "teachers."
# To maintain the structure of the pandas dataframe,
# I've created blank values for what students want to "teach" and what scientists want to "learn."
### write a function that would format names (including hyphens)
student_data['Full Name'] = format_name(student_data)
student_names = student_data['Full Name']
nStudents = len(student_names)
student_learn = student_data['If I could be any type of scientist when I grow up, I would want to study:']
student_teach = pd.Series(["" for i in range (nStudents)], index=[i for i in range(nStudents)])
student_email = pd.Series(["" for i in range (nStudents)], index=[i for i in range(nStudents)])
# Store scientist information in variables.
scientist_data = pd.read_csv("scientists_1.csv")
scientist_data = scientist_data.replace(np.nan,' ', regex=True)
#drop any duplicate email entries in the data frame
drop = np.where(scientist_data.duplicated('Email')==True)[0]
temp = scientist_data.drop(scientist_data.index[drop])
scientist_data = temp
scientist_data['Full Name'] = format_name(scientist_data)
scientist_names = scientist_data['Full Name']
nScientists = len(scientist_names)
scientist_learn = pd.Series(["" for i in range (nScientists)], index=[i for i in range(nScientists)])
scientist_teach = scientist_data['We will match you with a pen pal who has expressed an interest in at least one of the following subjects. Which topic is most relevant to your work?']
scientist_email = scientist_data['Email']
#drop any duplicate email entries in the data frame
drop = np.where(scientist_data.duplicated('Full Name')==True)[0]
temp = scientist_data.drop(scientist_data.index[drop])
scientist_data = temp
names = student_names.append(scientist_names, ignore_index=True)
learn = student_learn.append(scientist_learn, ignore_index=True)
teach = student_teach.append(scientist_teach, ignore_index=True)
emails = student_email.append(scientist_email, ignore_index=True)
G = nx.DiGraph()
# Insert users in graphs
for n,e,l,t in zip(names, emails, learn, teach):
collaboratr.insert_node(G,n, email=e, learn=l.split(';'), teach=t.split(';'))
def sort_things(stu_data, sci_data):
num_interests = {}
for i,r in stu_data.iterrows():
name = r['Name'].capitalize() + " " + r['Last'].capitalize()
num_interests = { name: 1 }
print(num_interests)
stu_names_sorted = sorted(num_interests, key=num_interests.get)
print(stu_names_sorted)
interests_stu = Counter(list(itertools.chain.from_iterable(\
[ i.split(';') for i in stu_data['If I could be any type of scientist when I grow up, I would want to study:'] ])))
interests_sci = Counter(list(itertools.chain.from_iterable(\
[ i.split(';') for i in sci_data['We will match you with a pen pal who has expressed an interest in at least one of the following subjects. Which topic is most relevant to your work?'] ])))
interests_rel = { key: interests_stu[key]/interests_sci[key] for key in interests_sci.keys() }
interests_rel_sorted = sorted(interests_rel, key=interests_rel.get)
return interests_rel_sorted, stu_names_sorted
def assigner(assign, stu_data, sci_data, max_students=2):
assign_one = {}
subscriptions = { n: 0 for n in sci_data['What is your name?'] }
interests_rel_sorted, stu_names_sorted = sort_things(stu_data, sci_data)
for key in interests_rel_sorted:
for name in stu_names_sorted:
if name not in assign_one:
if key in assign[name].keys():
try:
scientist = np.random.choice(assign[name][key])
except ValueError:
scientist = np.random.choice(scientist_data['What is your name?'])
assign_one[name] = scientist
subscriptions[scientist] += 1
if subscriptions[scientist]>=max_students:
for kk,vv in assign.items():
if vv:
for k,v in vv.items():
if scientist in v:
v.remove(scientist)
for name in stu_names_sorted:
if name not in assign_one:
scientist = np.random.choice([ k for k,v in subscriptions.items() if v < max_students ])
assign_one[name] = scientist
return assign_one
assign_one = None
max_students = 2
while assign_one is None:
try:
participants = G.nodes(data=True)
assign = collaboratr.assign_users(G,participants)
assign_one = assigner(assign, student_data, scientist_data, max_students=max_students)
if max(Counter([v for k,v in assign_one.items()]).values())>max_students:
assign_one = None
except ValueError:
# print("error")
pass
print(assign_one)
print(Counter([v for k,v in assign_one.items()]))
items = []
for k,v in assign_one.items():
items.append(str(v.ljust(22) + "-> " + k.ljust(22) + "who is interested in " \
+ student_data.loc[student_data['What is your name?'] == k]\
['What general science fields are you interested in?'].tolist()[0] ))
for i in sorted(items):
print(i)
a, b = sort_things(student_data, scientist_data)
print(a, b)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a 10k row version of the file for development.
Step2: I am only using 3.7% of Waffle's memory at the beginning
Step3: Use summary_counts, not summary_rpkm for gene names.
|
<ASSISTANT_TASK:>
Python Code:
! ls -lh ../waffle_network_dir/*.tsv
! wc -l ../waffle_network_dir/network.py.tsv
! head -n 5 ../waffle_network_dir/network.py.tsv | csvlook -t
! ls -lh ../waffle_network_dir/network.py.tsv
network = pd.read_csv('../waffle_network_dir/network.py.tsv', skiprows=1,
#skipfooter = 49995001 - 1*10**4,
#skipfooter = 1000, # can't have skipfooter with dtype. :(
sep='\t', names = ['source', 'target', 'pcor'],
dtype = {'source':str, 'target':str, 'pcor':float})
network.shape
network.head()
def label_associations(row):
if row['pcor'] > 0:
val = 'positive'
elif row['pcor'] < 0:
val = 'negative'
elif row['pcor'] == 0:
val = 'drop me'
return val
network['association'] = network.apply(label_associations, axis=1)
network['association'].unique()
print("shape before dropping rows with pcor == 0: {}".format(network.shape))
network = network[network['association'] != 'drop me']
print("shape after dropping rows with pcor == 0: {}".format(network.shape))
network.head(3)
! top -o %MEM | head
network['target_organism'] = network['target'].str.extract('([A-z0-9]+)_[0-9]+')
network['target_gene'] = network['target'].str.extract('[A-z0-9]+_([0-9]+)')
network['source_organism'] = network['source'].str.extract('([A-z0-9]+)_[0-9]+')
network['source_gene'] = network['source'].str.extract('[A-z0-9]+_([0-9]+)')
network.head()
network = network.rename(columns=lambda x: re.sub('source$', 'source_locus_tag', x))
network = network.rename(columns=lambda x: re.sub('target$', 'target_locus_tag', x))
network.head(2)
network['target_organism'].unique()
len(network['target_organism'].unique())
network['cross_species'] = network['source_organism'] != network['target_organism']
network.cross_species.describe()
network.cross_species.plot.hist()
network.pcor.plot.hist()
fig, ax = plt.subplots(1, 1, figsize=(4, 3))
plt.hist(network.pcor)
plt.yscale('log', nonposy='clip')
plt.xlabel('partial correlation value')
plt.ylabel('# edges')
plt.tight_layout()
plt.savefig('161209_hist_of_pcor_values.pdf')
plt.savefig('161209_hist_of_pcor_values.png', dpi=600)
fig, ax = plt.subplots(1, 1, figsize=(5, 2.5))
plt.hist(network.pcor, 50)
plt.yscale('log', nonposy='clip')
plt.xlabel('partial correlation value')
plt.ylabel('# edges')
plt.tight_layout()
plt.savefig('161209_hist_of_pcor_values_50_bins.pdf')
plt.savefig('161209_hist_of_pcor_values_50_bins.png', dpi=600)
locus_to_organism = pd.read_csv('/dacb/meta4_bins/data/genome_bins.locus_to_organism.tsv', sep='\t',
names=['locus', 'organism'])
locus_to_organism.head()
# Found a problem:
# Expected exactly 2 organsm names, but we have 3
# {'Methylobacter-123 (UID203) ', 'Methylobacter-123 (UID203)', 'Methylotenera mobilis-49 (UID203)'}
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.str.strip.html
# strips both left and right whitespace :)
locus_to_organism['organism'] = locus_to_organism['organism'].str.strip()
locus_to_organism['organism ID'] = locus_to_organism['locus'].str.extract('([A-z]+[0-9]+)_[0-9]+')
source_organism_names = locus_to_organism[['organism ID', 'organism']].drop_duplicates()
target_organism_names = locus_to_organism[['organism ID', 'organism']].drop_duplicates()
source_organism_names = source_organism_names.rename(
columns={'organism ID':'source_organism', 'organism':'source_organism_name'})
target_organism_names = target_organism_names.rename(
columns={'organism ID':'target_organism', 'organism':'target_organism_name'})
source_organism_names
merged = pd.merge(network, source_organism_names)
len(merged.source_organism_name.unique())
merged.head(2)
merged = pd.merge(merged, target_organism_names)
print(merged.shape)
print(network.shape)
merged.head()
len(merged.target_organism_name.unique())
print(merged.shape)
print(network.shape)
merged.tail(3)
genes = pd.read_csv('/dacb/meta4_bins/analysis/assemble_summaries/summary_counts.xls',
sep = '\t', usecols=[1, 2])
genes.tail(3)
genes.tail()
genes[genes['locus_tag'] == 'Ga0081607_11219']
merged.head(2)
source_genes = genes[['locus_tag', 'product']].rename(
columns={'locus_tag':'source_locus_tag', 'product':'source_gene_product'})
target_genes = genes[['locus_tag', 'product']].rename(
columns={'locus_tag':'target_locus_tag', 'product':'target_gene_product'})
source_genes.head(2)
network.shape
merged.shape
merged = pd.merge(merged, source_genes)
merged.shape
merged = pd.merge(merged, target_genes)
merged.shape
merged.head(2)
merged.head(3)
merged['sort'] = merged.pcor.abs()
merged = merged.sort(columns='sort', ascending=False).drop('sort', axis=1)
merged['pcor'].describe()
merged.head(2)
filename = '50M_network'
! ls ../data
dirname = '../data/50M_network/'
if not os.path.exists(dirname):
print"make dir {}".format(dirname)
os.mkdir(dirname)
else:
print("dir {} already exists.".format(dirname))
path = dirname + filename + '.tsv'
print('save to : {}'.format(path))
merged.to_csv(path, sep='\t', index=False)
# The CSV isn't a good idea because of the gene names.
#merged.to_csv(dirname + filename + '.csv')
merged.head(100).to_csv(dirname + filename + '--100' + '.tsv', sep='\t', index=False)
os.listdir(dirname)
merged.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Polynomial regression, revisited
Step2: Let's use matplotlib to visualize what a polynomial regression looks like on the house data.
Step3: As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.
Step4: Let us revisit the 15th-order polynomial model using the 'sqft_living' input. Generate polynomial features up to degree 15 using polynomial_sframe() and fit a model with these features. When fitting the model, use an L2 penalty of 1e-5
Step5: Note
Step6: QUIZ QUESTION
Step7: Next, fit a 15th degree polynomial on set_1, set_2, set_3, and set_4, using 'sqft_living' to predict prices. Print the weights and make a plot of the resulting model.
Step8: The four curves should differ from one another a lot, as should the coefficients you learned.
Step9: These curves should vary a lot less, now that you applied a high degree of regularization.
Step10: Once the data is shuffled, we divide it into equal segments. Each segment should receive n/k elements, where n is the number of observations in the training set and k is the number of segments. Since the segment 0 starts at index 0 and contains n/k elements, it ends at index (n/k)-1. The segment 1 starts where the segment 0 left off, at index (n/k). With n/k elements, the segment 1 ends at index (n*2/k)-1. Continuing in this fashion, we deduce that the segment i starts at index (n*i/k) and ends at (n*(i+1)/k)-1.
Step11: Let us familiarize ourselves with array slicing with SFrame. To extract a continuous slice from an SFrame, use colon in square brackets. For instance, the following cell extracts rows 0 to 9 of train_valid_shuffled. Notice that the first index (0) is included in the slice but the last index (10) is omitted.
Step12: Now let us extract individual segments with array slicing. Consider the scenario where we group the houses in the train_valid_shuffled dataframe into k=10 segments of roughly equal size, with starting and ending indices computed as above.
Step13: To verify that we have the right elements extracted, run the following cell, which computes the average price of the fourth segment. When rounded to nearest whole number, the average should be $536,234.
Step14: After designating one of the k segments as the validation set, we train a model using the rest of the data. To choose the remainder, we slice (0
Step15: Extract the remainder of the data after excluding fourth segment (segment 3) and assign the subset to train4.
Step16: Now we are ready to implement k-fold cross-validation. Write a function that computes k validation errors by designating each of the k segments as the validation set. It accepts as parameters (i) k, (ii) l2_penalty, (iii) dataframe, (iv) name of output column (e.g. price) and (v) list of feature names. The function returns the average validation error using k segments as validation sets.
Step17: Once we have a function to compute the average validation error for a model, we can write a loop to find the model that minimizes the average validation error. Write a loop that does the following
Step18: QUIZ QUESTIONS
Step19: Once you found the best value for the L2 penalty using cross-validation, it is important to retrain a final model on all of the training data using this value of l2_penalty. This way, your final model will be trained on the entire dataset.
Step20: QUIZ QUESTION
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
def polynomial_sframe(feature, degree):
# assume that degree >= 1
# initialize the SFrame:
poly_sframe = graphlab.SFrame()
# and set poly_sframe['power_1'] equal to the passed feature
poly_sframe['power_1'] = feature
# first check if degree > 1
if degree > 1:
# then loop over the remaining degrees:
# range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree
for power in range(2, degree+1):
# first we'll give the column a name:
name = 'power_' + str(power)
# then assign poly_sframe[name] to the appropriate power of feature
poly_sframe[name] = feature.apply(lambda x: x ** power)
return poly_sframe
import matplotlib.pyplot as plt
%matplotlib inline
sales = graphlab.SFrame('kc_house_data.gl/')
sales = sales.sort(['sqft_living','price'])
l2_small_penalty = 1e-5
poly15_data = polynomial_sframe(sales['sqft_living'], 15)
my_features = poly15_data.column_names() # get the name of the features
poly15_data['price'] = sales['price'] # add price to the data since it's the target
model15 = graphlab.linear_regression.create(poly15_data, target = 'price', features = my_features, l2_penalty=l2_small_penalty, validation_set = None)
model15.get('coefficients')
(semi_split1, semi_split2) = sales.random_split(.5,seed=0)
(set_1, set_2) = semi_split1.random_split(0.5, seed=0)
(set_3, set_4) = semi_split2.random_split(0.5, seed=0)
subdata_1 = polynomial_sframe(set_1['sqft_living'], 15)
features_1 = subdata_1.column_names() # get the name of the features
subdata_1['price'] = set_1['price'] # add price to the data since it's the target
model_1 = graphlab.linear_regression.create(subdata_1, target = 'price', features = features_1, l2_penalty=l2_small_penalty, validation_set = None, verbose=False)
model_1.get('coefficients').print_rows(num_rows=16)
plt.plot(subdata_1['power_1'],subdata_1['price'],'.',
subdata_1['power_1'], model_1.predict(subdata_1),'-')
subdata_2 = polynomial_sframe(set_2['sqft_living'], 15)
features_2 = subdata_2.column_names() # get the name of the features
subdata_2['price'] = set_2['price'] # add price to the data since it's the target
model_2 = graphlab.linear_regression.create(subdata_2, target = 'price', features = features_2, l2_penalty=l2_small_penalty, validation_set = None, verbose=False)
model_2.get('coefficients').print_rows(num_rows=16)
plt.plot(subdata_2['power_1'],subdata_2['price'],'.',
subdata_2['power_1'], model_2.predict(subdata_2),'-')
subdata_3 = polynomial_sframe(set_3['sqft_living'], 15)
features_3 = subdata_3.column_names() # get the name of the features
subdata_3['price'] = set_3['price'] # add price to the data since it's the target
model_3 = graphlab.linear_regression.create(subdata_3, target = 'price', features = features_3, l2_penalty=l2_small_penalty, validation_set = None, verbose=False)
model_3.get('coefficients').print_rows(num_rows=16)
plt.plot(subdata_3['power_1'],subdata_3['price'],'.',
subdata_3['power_1'], model_3.predict(subdata_3),'-')
subdata_4 = polynomial_sframe(set_4['sqft_living'], 15)
features_4 = subdata_4.column_names() # get the name of the features
subdata_4['price'] = set_4['price'] # add price to the data since it's the target
model_4 = graphlab.linear_regression.create(subdata_4, target = 'price', features = features_4, l2_penalty=l2_small_penalty, validation_set = None, verbose=False)
model_4.get('coefficients').print_rows(num_rows=16)
plt.plot(subdata_4['power_1'],subdata_4['price'],'.',
subdata_4['power_1'], model_4.predict(subdata_4),'-')
subdata_1 = polynomial_sframe(set_1['sqft_living'], 15)
features_1 = subdata_1.column_names() # get the name of the features
subdata_1['price'] = set_1['price'] # add price to the data since it's the target
model_1 = graphlab.linear_regression.create(subdata_1, target = 'price', features = features_1, l2_penalty=1e5, validation_set = None, verbose=False)
model_1.get('coefficients').print_rows(num_rows=16)
plt.plot(subdata_1['power_1'],subdata_1['price'],'.',
subdata_1['power_1'], model_1.predict(subdata_1),'-')
subdata_2 = polynomial_sframe(set_2['sqft_living'], 15)
features_2 = subdata_2.column_names() # get the name of the features
subdata_2['price'] = set_2['price'] # add price to the data since it's the target
model_2 = graphlab.linear_regression.create(subdata_2, target = 'price', features = features_2, l2_penalty=1e5, validation_set = None, verbose=False)
model_2.get('coefficients').print_rows(num_rows=16)
plt.plot(subdata_2['power_1'],subdata_2['price'],'.',
subdata_2['power_1'], model_2.predict(subdata_2),'-')
subdata_3 = polynomial_sframe(set_3['sqft_living'], 15)
features_3 = subdata_3.column_names() # get the name of the features
subdata_3['price'] = set_3['price'] # add price to the data since it's the target
model_3 = graphlab.linear_regression.create(subdata_3, target = 'price', features = features_3, l2_penalty=1e5, validation_set = None, verbose=False)
model_3.get('coefficients').print_rows(num_rows=16)
plt.plot(subdata_3['power_1'],subdata_3['price'],'.',
subdata_3['power_1'], model_3.predict(subdata_3),'-')
subdata_4 = polynomial_sframe(set_4['sqft_living'], 15)
features_4 = subdata_4.column_names() # get the name of the features
subdata_4['price'] = set_4['price'] # add price to the data since it's the target
model_4 = graphlab.linear_regression.create(subdata_4, target = 'price', features = features_4, l2_penalty=1e5, validation_set = None, verbose=False)
model_4.get('coefficients').print_rows(num_rows=16)
plt.plot(subdata_4['power_1'],subdata_4['price'],'.',
subdata_4['power_1'], model_4.predict(subdata_4),'-')
(train_valid, test) = sales.random_split(.9, seed=1)
train_valid_shuffled = graphlab.toolkits.cross_validation.shuffle(train_valid, random_seed=1)
n = len(train_valid_shuffled)
k = 10 # 10-fold cross-validation
for i in xrange(k):
start = (n*i)/k
end = (n*(i+1))/k-1
print i, (start, end)
train_valid_shuffled[0:10] # rows 0 to 9
k = 10
def get_st_ed(i):
start = (n*i)/k
end = (n*(i+1))/k-1
return start, end
st, ed = get_st_ed(3)
validation4 = train_valid_shuffled[st:ed+1]
print int(round(validation4['price'].mean(), 0))
n = len(train_valid_shuffled)
first_two = train_valid_shuffled[0:2]
last_two = train_valid_shuffled[n-2:n]
print first_two.append(last_two)
print int(round(train4['price'].mean(), 0))
def get_st_ed(k, i):
start = (n*i)/k
end = (n*(i+1))/k-1
return start, end
def k_fold_cross_validation(k, l2_penalty, data, output_name, features_list):
total_RSS = 0
for i in range(k):
st, ed = get_st_ed(k, i)
validation_set = data[st:ed+1]
training_set = data[:st].append(data[ed+1:])
model = graphlab.linear_regression.create(training_set, target = output_name, features = features_list,
l2_penalty=l2_penalty, validation_set = None, verbose=False)
predictions = model.predict(validation_set)
residuals = predictions - validation_set['price']
RSS = (residuals * residuals).sum()
total_RSS += RSS
return total_RSS / k
import numpy as np
penalty_list = np.logspace(1, 7, num=13)
data = polynomial_sframe(train_valid_shuffled['sqft_living'], 15)
features_list = data.column_names()
data['price'] = train_valid_shuffled['price']
sort_table = []
for penalty in penalty_list:
avg_RSS = k_fold_cross_validation(k=10, l2_penalty=penalty, data=data, output_name='price', features_list=features_list)
print 'penalty ', penalty
print 'avg_RSS', avg_RSS
sort_table.append((avg_RSS, penalty))
print sorted(sort_table)[0]
# Plot the l2_penalty values in the x axis and the cross-validation error in the y axis.
# Using plt.xscale('log') will make your plot more intuitive.
errors = [x[0] for x in sort_table]
plt.xscale('log')
plt.plot(penalty_list, errors, '-')
final_data = polynomial_sframe(train_valid['sqft_living'], 15)
final_features_list = final_data.column_names()
final_data['price'] = train_valid['price']
final_model = graphlab.linear_regression.create(final_data, target = 'price', features = final_features_list,
l2_penalty=penalty_list[3], validation_set = None, verbose=False)
tdata = polynomial_sframe(test['sqft_living'], 15)
tdata['price'] = test['price']
predictions = final_model.predict(tdata)
residuals = predictions - tdata['price']
RSS = (residuals * residuals).sum()
print RSS
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import de modules spécifiques à Openfisca
Step2: Import d'une nouvelle palette de couleurs
Step3: Import des fichiers csv donnant les montants agrégés des dépenses répertoriées dans les enquêtes BdF. Ces montants sont calculés dans compute_depenses_carburants
Step4: Import des fichiers csv donnant les montants agrégés des mêmes postes d'après la comptabilité nationale
Step5: Création des graphiques pour comparer les consommations obtenues via Budget des familles vis-à-vis de la comptabilité nationale
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import pkg_resources
import os
import pandas as pd
from pandas import concat
import seaborn
from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_line
seaborn.set_palette(seaborn.color_palette("Set2", 12))
%matplotlib inline
assets_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location
)
products = ['transports', 'carburants', 'essence', 'diesel']
depenses_bdf = pd.DataFrame()
for element in products:
depenses = pd.DataFrame.from_csv(os.path.join(assets_directory,
'openfisca_france_indirect_taxation', 'assets', 'depenses',
'depenses_{}_totales_bdf.csv').format(element), sep = ',', header = -1)
depenses.rename(columns = {1: '{} bdf'.format(element)}, inplace = True)
depenses.index = depenses.index.str.replace('en ', '')
depenses = depenses.sort_index()
depenses_bdf = concat([depenses, depenses_bdf], axis = 1)
depenses_bdf.index = depenses_bdf.index.astype(int)
parametres_fiscalite_file_path = os.path.join(
assets_directory,
'openfisca_france_indirect_taxation',
'assets',
'legislation',
'Parametres fiscalite indirecte.xls'
)
masses_cn_data_frame = pd.read_excel(parametres_fiscalite_file_path, sheetname = "consommation_CN")
masses_cn_carburants = masses_cn_data_frame[masses_cn_data_frame['Fonction'] == 'Carburants et lubrifiants']
masses_cn_carburants = masses_cn_carburants.transpose()
masses_cn_carburants.rename(columns = {76: 'carburants agregat'}, inplace = True)
masses_cn_transports = masses_cn_data_frame[masses_cn_data_frame['Fonction'] == 'Transports']
masses_cn_transports = masses_cn_transports.transpose()
masses_cn_transports.rename(columns = {69: 'transports agregat'}, inplace = True)
comparaison_bdf_agregats = concat([depenses_bdf, masses_cn_carburants, masses_cn_transports], axis = 1)
comparaison_bdf_agregats = comparaison_bdf_agregats.dropna()
print 'Comparaison des dépenses agrégées en carburant, Budget des Familles par rapport à la comptabilité nationale'
graph_builder_line(comparaison_bdf_agregats[['carburants agregat'] + ['carburants bdf']])
print 'Comparaison des dépenses agrégées sur les transports, Budget des Familles par rapport à la comptabilité nationale'
graph_builder_line(comparaison_bdf_agregats[['transports agregat'] + ['transports bdf']])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Moduły
Step2: Co można zaimportować?
Step3: from module import (
Step4: Uwagi
Step5: Zakresy
Step6: Co zwróci odpalanie modb.py, a co zaimportowanie X z moda?
Step7: 2
Step8: Co zwróci uruchmienie mod1.py ?
Step9: __init__.py
Step10: Importy względne
Step11: Stąd
Step12: Referencje
Step13: Co się stanie jak odpalimy sec.py?
Step14: Zadanie
|
<ASSISTANT_TASK:>
Python Code:
range(2), xrange(2)
def gen(r):
for i in xrange(r):
yield i ** 2
generator = gen(5)
generator
list(generator)
generator = (i ** 2 for i in xrange(5))
generator
list(generator)
def gen_squares(up_to=100000):
s = 0
for sq in (i**2 for i in xrange(up_to)):
s += sq
return s
def squares(up_to=100000):
s = 0
for sq in [i**2 for i in xrange(up_to)]:
s += sq
return s
%memit gen_squares()
%memit squares()
import sys
sys.path[-2:]
# %load mod.py
def printer(x):
print x
ATTR = 1
MUTABLE = [1, 2, 3]
import mod
mod.printer("bla bla")
from mod import printer
printer("bla bla")
import mod
mod.ATTR
mod.ATTR = 231
mod.ATTR
import mod
mod.ATTR
%%javascript
IPython.notebook.kernel.restart();
# %load mod.py
def printer(x):
print x
ATTR = 1
MUTABLE = [1, 2, 3]
from mod import ATTR, MUTABLE
ATTR = 1
MUTABLE[2] = 12
import mod
mod.ATTR, mod.MUTABLE
# %load a.py
def fun():
print "jestem w a"
# %load b.py
def fun():
print "jestem w b"
from a import fun
from b import fun
fun()
import a
import b
a.fun()
b.fun()
# %load moda.py
X = 88
def f():
global X
X = 99
# %load modb.py
X = 11
import moda
moda.f()
print(X, moda.X)
%run modb.py
%%javascript
IPython.notebook.kernel.restart();
from moda import X
X
# %load mod1.py
x = 1
import mod2
print(x, mod2.x, mod2.mod3.x)
# %load mod2.py
x = 2
import mod3
print(x, mod3.x)
# %load mod3.py
x = 3
%run mod1.py
!tree module/
# %load module/__init__.py
print "module"
x = 1
# %load module/dir1/__init__.py
print "dir1"
y = 2
# %load module/dir1/mod.py
print "mod.py"
z = 3
import module.dir1.mod
import module.dir1.mod
module.x
module.dir1.y
module.dir1.mod.z
# %load name.py
print __name__
import name
%run name
# %load kolejnosc.py
func1()
def func1():
print(func2())
func1()
def func2():
return("No witam")
func1()
%run kolejnosc.py
# %load kolejnosc.py
# func1()
def func1():
print(func2())
func1()
def func2():
return("No witam")
func1()
%run kolejnosc.py
# %load kolejnosc.py
# func1()
def func1():
print(func2())
# func1()
def func2():
return("No witam")
func1()
%run kolejnosc.py
# %load first.py
x = 1
def printer():
print(x)
# %load sec.py
from first import x, printer
x = 88
printer()
%run sec.py
from rozwiazania import *
prostokat(4, 10)
kwadrat(5)
trojkat(4)
wiatraczek(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Evaluating model solver
Step2: Expansion of orthogonal polynomials
Step3: Fourier coefficients
Step4: Note that if the Fourier coefficients are needed, then they are still
Step5: Error analysis
Step6: The analysis can be performed as follows
|
<ASSISTANT_TASK:>
Python Code:
import chaospy
from problem_formulation import joint
gauss_quads = [
chaospy.generate_quadrature(order, joint, rule="gaussian")
for order in range(1, 8)
]
sparse_quads = [
chaospy.generate_quadrature(
order, joint, rule=["genz_keister_24", "clenshaw_curtis"], sparse=True)
for order in range(1, 5)
]
from matplotlib import pyplot
pyplot.rc("figure", figsize=[12, 4])
nodes, weights = gauss_quads[5]
pyplot.subplot(121)
pyplot.title("Gaussian")
pyplot.scatter(*nodes, s=weights*2e3)
nodes, weights = sparse_quads[3]
idx = weights > 0
pyplot.subplot(122)
pyplot.title("sparse-grid")
pyplot.scatter(*nodes[:, idx], s=weights[idx]*2e3)
pyplot.scatter(*nodes[:, ~idx], s=-weights[~idx]*2e3, color="grey")
pyplot.show()
from problem_formulation import model_solver, coordinates
import numpy
gauss_evals = [
numpy.array([model_solver(node) for node in nodes.T])
for nodes, weights in gauss_quads
]
sparse_evals = [
numpy.array([model_solver(node) for node in nodes.T])
for nodes, weights in sparse_quads
]
pyplot.subplot(121)
pyplot.plot(coordinates, gauss_evals[6].T, alpha=0.3)
pyplot.title("Gaussian")
pyplot.subplot(122)
pyplot.plot(coordinates, sparse_evals[3].T, alpha=0.3)
pyplot.title("sparse-grid")
pyplot.show()
expansions = [chaospy.generate_expansion(order, joint)
for order in range(1, 10)]
expansions[0].round(10)
gauss_model_approx = [
chaospy.fit_quadrature(expansion, nodes, weights, evals)
for expansion, (nodes, weights), evals in zip(expansions, gauss_quads, gauss_evals)
]
sparse_model_approx = [
chaospy.fit_quadrature(expansion, nodes, weights, evals)
for expansion, (nodes, weights), evals in zip(expansions, sparse_quads, sparse_evals)
]
model_approx = gauss_model_approx[4]
nodes, _ = gauss_quads[4]
evals = model_approx(*nodes)
pyplot.subplot(121)
pyplot.plot(coordinates, evals, alpha=0.3)
pyplot.title("Gaussian")
model_approx = sparse_model_approx[1]
nodes, _ = sparse_quads[1]
evals = model_approx(*nodes)
pyplot.subplot(122)
pyplot.plot(coordinates, evals, alpha=0.3)
pyplot.title("sparse-grid")
pyplot.show()
expected = chaospy.E(gauss_model_approx[-2], joint)
std = chaospy.Std(gauss_model_approx[-2], joint)
expected[:4].round(4), std[:4].round(4)
pyplot.rc("figure", figsize=[6, 4])
pyplot.xlabel("coordinates")
pyplot.ylabel("model approximation")
pyplot.fill_between(
coordinates, expected-2*std, expected+2*std, alpha=0.3)
pyplot.plot(coordinates, expected)
pyplot.show()
from problem_formulation import error_in_mean, error_in_variance
error_in_mean(expected), error_in_variance(std**2)
gauss_sizes = [len(weights) for _, weights in gauss_quads]
eps_gauss_mean = [
error_in_mean(chaospy.E(model, joint))
for model in gauss_model_approx
]
eps_gauss_var = [
error_in_variance(chaospy.Var(model, joint))
for model in gauss_model_approx
]
sparse_sizes = [len(weights) for _, weights in sparse_quads]
eps_sparse_mean = [
error_in_mean(chaospy.E(model, joint))
for model in sparse_model_approx
]
eps_sparse_var = [
error_in_variance(chaospy.Var(model, joint))
for model in sparse_model_approx
]
pyplot.rc("figure", figsize=[12, 4])
pyplot.subplot(121)
pyplot.title("Error in mean")
pyplot.loglog(gauss_sizes, eps_gauss_mean, "o-", label="Gaussian")
pyplot.loglog(sparse_sizes, eps_sparse_mean, "o--", label="sparse")
pyplot.legend()
pyplot.subplot(122)
pyplot.title("Error in variance")
pyplot.loglog(gauss_sizes, eps_gauss_var, "o-", label="Gaussian")
pyplot.loglog(sparse_sizes, eps_sparse_var, "o--", label="sparse")
pyplot.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the Google cloud-storage library as well.
Step2: Restart the Kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your GCP account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Vertex AI constants
Step12: Clients
Step13: Prepare a trainer script
Step14: Task.py contents
Step15: Store training script on your Cloud Storage bucket
Step16: Train a model
Step17: Example output
Step18: Response
Step19: Example output
Step20: projects.locations.hyperparameterTuningJob.get
Step21: Response
Step22: Example output
Step23: Review the results of the study
Step24: Example output
|
<ASSISTANT_TASK:>
Python Code:
! pip3 install -U google-cloud-aiplatform --user
! pip3 install google-cloud-storage
import os
if not os.getenv("AUTORUN"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your Google Cloud account. This provides access
# to your Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Vertex, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this tutorial in a notebook locally, replace the string
# below with the path to your service account key and run this cell to
# authenticate your Google Cloud account.
else:
%env GOOGLE_APPLICATION_CREDENTIALS your_path_to_credentials.json
# Log in to your account on Google Cloud
! gcloud auth login
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION gs://$BUCKET_NAME
! gsutil ls -al gs://$BUCKET_NAME
import os
import sys
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API Endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex AI location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
clients = {}
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
clients["job"] = create_job_client()
for client in clients.items():
print(client)
# Make folder for python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\
tag_build =\n\
tag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\
# Requires TensorFlow Datasets\n\
setuptools.setup(\n\
install_requires=[\n\
'tensorflow_datasets==1.3.0',\n\
],\n\
packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\
Name: Hyperparameter Tuning - Boston Housing\n\
Version: 0.0.0\n\
Summary: Demonstration hyperparameter tuning script\n\
Home-page: www.google.com\n\
Author: Google\n\
Author-email: aferlitsch@gmail.com\n\
License: Public\n\
Description: Demo\n\
Platform: Vertex AI"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
%%writefile custom/trainer/task.py
# hyperparameter tuningfor Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
from hypertune import HyperTune
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default='/tmp/saved_model', type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--units', dest='units',
default=64, type=int,
help='Number of units.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(args.units, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(args.units, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
model = build_and_compile_dnn_model()
# Instantiate the HyperTune reporting object
hpt = HyperTune()
# Reporting callback
class HPTCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
global hpt
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='val_loss',
metric_value=logs['val_loss'],
global_step=epoch)
# Train the model
BATCH_SIZE = 16
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=BATCH_SIZE, validation_split=0.1, callbacks=[HPTCallback()])
model.save(args.model_dir)
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz gs://$BUCKET_NAME/hpt_boston_housing.tar.gz
JOB_NAME = "hyperparameter_tuning_" + TIMESTAMP
WORKER_POOL_SPEC = [
{
"replica_count": 1,
"machine_spec": {"machine_type": "n1-standard-4", "accelerator_count": 0},
"python_package_spec": {
"executor_image_uri": "gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest",
"package_uris": ["gs://" + BUCKET_NAME + "/hpt_boston_housing.tar.gz"],
"python_module": "trainer.task",
"args": ["--model-dir=" + "gs://{}/{}".format(BUCKET_NAME, JOB_NAME)],
},
}
]
STUDY_SPEC = {
"metrics": [
{"metric_id": "val_loss", "goal": aip.StudySpec.MetricSpec.GoalType.MINIMIZE}
],
"parameters": [
{
"parameter_id": "lr",
"discrete_value_spec": {"values": [0.001, 0.01, 0.1]},
"scale_type": aip.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
},
{
"parameter_id": "units",
"integer_value_spec": {"min_value": 32, "max_value": 256},
"scale_type": aip.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
},
],
"algorithm": aip.StudySpec.Algorithm.RANDOM_SEARCH,
}
hyperparameter_tuning_job = aip.HyperparameterTuningJob(
display_name=JOB_NAME,
trial_job_spec={"worker_pool_specs": WORKER_POOL_SPEC},
study_spec=STUDY_SPEC,
max_trial_count=6,
parallel_trial_count=1,
)
print(
MessageToJson(
aip.CreateHyperparameterTuningJobRequest(
parent=PARENT, hyperparameter_tuning_job=hyperparameter_tuning_job
).__dict__["_pb"]
)
)
request = clients["job"].create_hyperparameter_tuning_job(
parent=PARENT, hyperparameter_tuning_job=hyperparameter_tuning_job
)
print(MessageToJson(request.__dict__["_pb"]))
# The full unique ID for the hyperparameter tuningjob
hyperparameter_tuning_id = request.name
# The short numeric ID for the hyperparameter tuningjob
hyperparameter_tuning_short_id = hyperparameter_tuning_id.split("/")[-1]
print(hyperparameter_tuning_id)
request = clients["job"].get_hyperparameter_tuning_job(name=hyperparameter_tuning_id)
print(MessageToJson(request.__dict__["_pb"]))
while True:
response = clients["job"].get_hyperparameter_tuning_job(
name=hyperparameter_tuning_id
)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Study trials have not completed:", response.state)
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
break
else:
print("Study trials have completed:", response.end_time - response.start_time)
break
time.sleep(20)
best = (None, None, None, 0.0)
response = clients["job"].get_hyperparameter_tuning_job(name=hyperparameter_tuning_id)
for trial in response.trials:
print(MessageToJson(trial.__dict__["_pb"]))
# Keep track of the best outcome
try:
if float(trial.final_measurement.metrics[0].value) > best[3]:
best = (
trial.id,
float(trial.parameters[0].value),
float(trial.parameters[1].value),
float(trial.final_measurement.metrics[0].value),
)
except:
pass
print()
print("ID", best[0])
print("Decay", best[1])
print("Learning Rate", best[2])
print("Validation Accuracy", best[3])
delete_hpt_job = True
delete_bucket = True
# Delete the hyperparameter tuningusing the Vertex AI fully qualified identifier for the custome training
try:
if delete_hpt_job:
clients["job"].delete_hyperparameter_tuning_job(name=hyperparameter_tuning_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r gs://$BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's start with a string lightning round to warm up. What are the lengths of the strings below?
Step2: 0b.
Step3: 0c.
Step5: 0d.
Step6: 0e.
Step10: 1.
Step12: 2.
Step14: 3.
|
<ASSISTANT_TASK:>
Python Code:
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex6 import *
print('Setup complete.')
a = ""
length = ____
q0.a.check()
b = "it's ok"
length = ____
q0.b.check()
c = 'it\'s ok'
length = ____
q0.c.check()
d = hey
length = ____
q0.d.check()
e = '\n'
length = ____
q0.e.check()
def is_valid_zip(zip_code):
Returns whether the input string is a valid (5 digit) zip code
pass
# Check your answer
q1.check()
#%%RM_IF(PROD)%%
def is_valid_zip(zip_code):
Returns whether the input string is a valid (5 digit) zip code
return len(zip_code) == 5 and zip_code.isdigit()
q1.assert_check_passed()
#%%RM_IF(PROD)%%
def is_valid_zip(zip_code):
Returns whether the input string is a valid (5 digit) zip code
return len(zip_code) == 5
q1.assert_check_failed()
#_COMMENT_IF(PROD)_
q1.hint()
#_COMMENT_IF(PROD)_
q1.solution()
def word_search(doc_list, keyword):
Takes a list of documents (each document is a string) and a keyword.
Returns list of the index values into the original list for all documents
containing the keyword.
Example:
doc_list = ["The Learn Python Challenge Casino.", "They bought a car", "Casinoville"]
>>> word_search(doc_list, 'casino')
>>> [0]
pass
# Check your answer
q2.check()
#_COMMENT_IF(PROD)_
q2.hint()
#_COMMENT_IF(PROD)_
q2.solution()
def multi_word_search(doc_list, keywords):
Takes list of documents (each document is a string) and a list of keywords.
Returns a dictionary where each key is a keyword, and the value is a list of indices
(from doc_list) of the documents containing that keyword
>>> doc_list = ["The Learn Python Challenge Casino.", "They bought a car and a casino", "Casinoville"]
>>> keywords = ['casino', 'they']
>>> multi_word_search(doc_list, keywords)
{'casino': [0, 1], 'they': [1]}
pass
# Check your answer
q3.check()
#_COMMENT_IF(PROD)_
q3.solution()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 - Translating human readable dates into machine readable dates
Step2: You've loaded
Step3: You now have
Step4: 2 - Neural machine translation with attention
Step6: Now you can use these layers to implement one_step_attention(). In order to propagate a Keras tensor object X through one of these layers, use layer(X) (or layer([X,Y]) if it requires multiple inputs.), e.g. densor(X) will propagate X through the Dense(1) layer defined above.
Step7: You will be able to check the expected output of one_step_attention() after you've coded the model() function.
Step9: Now you can use these layers $T_y$ times in a for loop to generate the outputs, and their parameters will not be reinitialized. You will have to carry out the following steps
Step10: Run the following cell to create your model.
Step11: Let's get a summary of the model to check if it matches the expected output.
Step12: Expected Output
Step13: The last step is to define all your inputs and outputs to fit the model
Step14: Let's now fit the model and run it for one epoch.
Step15: While training you can see the loss as well as the accuracy on each of the 10 positions of the output. The table below gives you an example of what the accuracies could be if the batch had 2 examples
Step16: You can now see the results on new examples.
Step17: You can also change these examples to test with your own examples. The next part will give you a better sense on what the attention mechanism is doing--i.e., what part of the input the network is paying attention to when generating a particular output character.
Step18: Navigate through the output of model.summary() above. You can see that the layer named attention_weights outputs the alphas of shape (m, 30, 1) before dot_2 computes the context vector for every time step $t = 0, \ldots, T_y-1$. Lets get the activations from this layer.
|
<ASSISTANT_TASK:>
Python Code:
from keras.layers import Bidirectional, Concatenate, Permute, Dot, Input, LSTM, Multiply
from keras.layers import RepeatVector, Dense, Activation, Lambda
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras.models import load_model, Model
import keras.backend as K
import numpy as np
from faker import Faker
import random
from tqdm import tqdm
from babel.dates import format_date
from nmt_utils import *
import matplotlib.pyplot as plt
%matplotlib inline
m = 10000
dataset, human_vocab, machine_vocab, inv_machine_vocab = load_dataset(m)
dataset[:10]
Tx = 30
Ty = 10
X, Y, Xoh, Yoh = preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty)
print("X.shape:", X.shape)
print("Y.shape:", Y.shape)
print("Xoh.shape:", Xoh.shape)
print("Yoh.shape:", Yoh.shape)
index = 0
print("Source date:", dataset[index][0])
print("Target date:", dataset[index][1])
print()
print("Source after preprocessing (indices):", X[index])
print("Target after preprocessing (indices):", Y[index])
print()
print("Source after preprocessing (one-hot):", Xoh[index])
print("Target after preprocessing (one-hot):", Yoh[index])
# Defined shared layers as global variables
repeator = RepeatVector(Tx)
concatenator = Concatenate(axis=-1)
densor = Dense(1, activation = "relu")
activator = Activation(softmax, name='attention_weights') # We are using a custom softmax(axis = 1) loaded in this notebook
dotor = Dot(axes = 1)
# GRADED FUNCTION: one_step_attention
def one_step_attention(a, s_prev):
Performs one step of attention: Outputs a context vector computed as a dot product of the attention weights
"alphas" and the hidden states "a" of the Bi-LSTM.
Arguments:
a -- hidden state output of the Bi-LSTM, numpy-array of shape (m, Tx, 2*n_a)
s_prev -- previous hidden state of the (post-attention) LSTM, numpy-array of shape (m, n_s)
Returns:
context -- context vector, input of the next (post-attetion) LSTM cell
### START CODE HERE ###
# Use repeator to repeat s_prev to be of shape (m, Tx, n_s) so that you can concatenate it with all hidden states "a" (≈ 1 line)
s_prev = None
# Use concatenator to concatenate a and s_prev on the last axis (≈ 1 line)
concat = None
# Use densor to propagate concat through a small fully-connected neural network to compute the "energies" variable e. (≈1 lines)
e = None
# Use activator and e to compute the attention weights "alphas" (≈ 1 line)
alphas = None
# Use dotor together with "alphas" and "a" to compute the context vector to be given to the next (post-attention) LSTM-cell (≈ 1 line)
context = None
### END CODE HERE ###
return context
n_a = 64
n_s = 128
post_activation_LSTM_cell = LSTM(n_s, return_state = True)
output_layer = Dense(len(machine_vocab), activation=softmax)
# GRADED FUNCTION: model
def model(Tx, Ty, n_a, n_s, human_vocab_size, machine_vocab_size):
Arguments:
Tx -- length of the input sequence
Ty -- length of the output sequence
n_a -- hidden state size of the Bi-LSTM
n_s -- hidden state size of the post-attention LSTM
human_vocab_size -- size of the python dictionary "human_vocab"
machine_vocab_size -- size of the python dictionary "machine_vocab"
Returns:
model -- Keras model instance
# Define the inputs of your model with a shape (Tx,)
# Define s0 and c0, initial hidden state for the decoder LSTM of shape (n_s,)
X = Input(shape=(Tx, human_vocab_size))
s0 = Input(shape=(n_s,), name='s0')
c0 = Input(shape=(n_s,), name='c0')
s = s0
c = c0
# Initialize empty list of outputs
outputs = []
### START CODE HERE ###
# Step 1: Define your pre-attention Bi-LSTM. Remember to use return_sequences=True. (≈ 1 line)
a = None
# Step 2: Iterate for Ty steps
for t in range(None):
# Step 2.A: Perform one step of the attention mechanism to get back the context vector at step t (≈ 1 line)
context = None
# Step 2.B: Apply the post-attention LSTM cell to the "context" vector.
# Don't forget to pass: initial_state = [hidden state, cell state] (≈ 1 line)
s, _, c = None
# Step 2.C: Apply Dense layer to the hidden state output of the post-attention LSTM (≈ 1 line)
out = None
# Step 2.D: Append "out" to the "outputs" list (≈ 1 line)
None
# Step 3: Create model instance taking three inputs and returning the list of outputs. (≈ 1 line)
model = None
### END CODE HERE ###
return model
model = model(Tx, Ty, n_a, n_s, len(human_vocab), len(machine_vocab))
model.summary()
### START CODE HERE ### (≈2 lines)
opt = None
None
### END CODE HERE ###
s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
outputs = list(Yoh.swapaxes(0,1))
model.fit([Xoh, s0, c0], outputs, epochs=1, batch_size=100)
model.load_weights('models/model.h5')
EXAMPLES = ['3 May 1979', '5 April 09', '21th of August 2016', 'Tue 10 Jul 2007', 'Saturday May 9 2018', 'March 3 2001', 'March 3rd 2001', '1 March 2001']
for example in EXAMPLES:
source = string_to_int(example, Tx, human_vocab)
source = np.array(list(map(lambda x: to_categorical(x, num_classes=len(human_vocab)), source))).swapaxes(0,1)
prediction = model.predict([source, s0, c0])
prediction = np.argmax(prediction, axis = -1)
output = [inv_machine_vocab[int(i)] for i in prediction]
print("source:", example)
print("output:", ''.join(output))
model.summary()
attention_map = plot_attention_map(model, human_vocab, inv_machine_vocab, "Tuesday April 08 1993", num = 6, n_s = 128)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Logistic Regression
Step2: Our task is to predict the household column using the al column. Let's visualize the relationship between the input and output and also train the logsitic regression to see the outcome that it produces.
Step3: As we can see, logistic regression can output the probabilities of observation belonging to a specific class and these probabilities can be converted into class predictions by choosing a cutoff value (e.g. probability higher than 0.5 is classified as class 1).
Step5: The logistic function has some nice properties. The y-value represents the probability and it is always bounded between 0 and 1, which is want we wanted for probabilities. For an x value of 0 you get a 0.5 probability. Also as you get more positive x value you get a higher probability, on the other hand, a more negative x value results in a lower probability.
Step6: Interpreting the Intercept
Step7: Interpretation
Step9: Defining The Cost Function
Step12: Note
Step13: Comparing Result and Convergence Behavior
|
<ASSISTANT_TASK:>
Python Code:
# code for loading the format for the notebook
import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir(os.path.join('..', 'notebook_format'))
from formats import load_style
load_style(plot_style = False)
os.chdir(path)
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
%watermark -a 'Ethen' -d -t -v -p numpy,pandas,matplotlib,sklearn
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data'
col_names = ['id', 'ri', 'na', 'mg', 'al', 'si', 'k', 'ca', 'ba', 'fe', 'glass_type']
glass = pd.read_csv(url, names = col_names, index_col = 'id')
glass.sort_values('al', inplace = True)
# convert the glass type into binary outcome
# types 1, 2, 3 are window glass
# types 5, 6, 7 are household glass
glass['household'] = glass['glass_type'].map({1: 0, 2: 0, 3: 0, 5: 1, 6: 1, 7: 1})
glass.head()
logreg = LogisticRegression(C = 1e9)
X = glass['al'].values.reshape(-1, 1) # sklearn doesn't accept 1d-array, convert it to 2d
y = np.array(glass['household'])
logreg.fit(X, y)
# predict the probability that each observation belongs to class 1
# The first column indicates the predicted probability of class 0,
# and the second column indicates the predicted probability of class 1
glass['household_pred_prob'] = logreg.predict_proba(X)[:, 1]
# plot the predicted probability (familiarize yourself with the S-shape)
# change default figure and font size
plt.rcParams['figure.figsize'] = 8, 6
plt.rcParams['font.size'] = 12
plt.scatter(glass['al'], glass['household'])
plt.plot(glass['al'], glass['household_pred_prob'])
plt.xlabel('al')
plt.ylabel('household')
plt.show()
x_values = np.linspace(-5, 5, 100)
y_values = [1 / (1 + np.exp(-x)) for x in x_values]
plt.plot(x_values, y_values)
plt.title('Logsitic Function')
plt.show()
def predict_probability(data, weights):
probability predicted by the logistic regression
score = np.dot(data, weights)
predictions = 1 / (1 + np.exp(-score))
return predictions
# compute predicted log-odds for al = 2 using the equation
# convert log-odds to odds
# convert odds to probability
logodds = logreg.intercept_ + logreg.coef_[0] * 2
odds = np.exp(logodds)
prob = odds / (1 + odds)
print(prob)
logreg.predict_proba(2)[:, 1]
# examine the coefficient for al
print('a1', logreg.coef_[0])
# increasing al by 1 (so that al now becomes 3)
# increases the log-odds by 4.18
logodds = logodds + logreg.coef_[0]
odds = np.exp(logodds)
prob = odds / (1 + odds)
print(prob)
logreg.predict_proba(3)[:, 1]
def compute_avg_log_likelihood(data, label, weights):
the function uses a simple check to prevent overflow problem,
where numbers gets too large to represent and is converted to inf
an example of overflow is provided below, when this problem occurs,
simply use the original score (without taking the exponential)
scores = np.array( [ -10000, 200, 300 ] )
logexp = np.log( 1 + np.exp(-scores) )
logexp
scores = np.dot(data, weights)
logexp = np.log(1 + np.exp(-scores))
# simple check to prevent overflow
mask = np.isinf(logexp)
logexp[mask] = -scores[mask]
log_likelihood = np.sum((label - 1) * scores - logexp) / data.shape[0]
return log_likelihood
# put the code together into one cell
def predict_probability(data, weights):
probability predicted by the logistic regression
score = np.dot(data, weights)
predictions = 1 / (1 + np.exp(-score))
return predictions
def compute_avg_log_likelihood(data, label, weights):
the function uses a simple check to prevent overflow problem,
where numbers gets too large to represent and is converted to inf
an example of overflow is provided below, when this problem occurs,
simply use the original score (without taking the exponential)
scores = np.array([-10000, 200, 300])
logexp = np.log(1 + np.exp(-scores))
logexp
scores = np.dot(data, weights)
logexp = np.log(1 + np.exp(-scores))
# simple check to prevent overflow
mask = np.isinf(logexp)
logexp[mask] = -scores[mask]
log_likelihood = np.sum((label - 1) * scores - logexp) / data.shape[0]
return log_likelihood
def logistic_regression(data, label, step_size, batch_size, max_iter):
# weights of the model are initialized as zero
data_num = data.shape[0]
feature_num = data.shape[1]
weights = np.zeros(data.shape[1])
# `i` keeps track of the starting index of current batch
# and shuffle the data before starting
i = 0
permutation = np.random.permutation(data_num)
data, label = data[permutation], label[permutation]
# do a linear scan over data, for each iteration update the weight using
# batches of data, and store the log likelihood record to visualize convergence
log_likelihood_record = []
for _ in range(max_iter):
# extract the batched data and label use it to compute
# the predicted probability using the current weight and the errors
batch = slice(i, i + batch_size)
batch_data, batch_label = data[batch], label[batch]
predictions = predict_probability(batch_data, weights)
errors = batch_label - predictions
# loop over each coefficient to compute the derivative and update the weight
for j in range(feature_num):
derivative = np.dot(errors, batch_data[:, j])
weights[j] += step_size * derivative / batch_size
# track whether log likelihood is increasing after
# each weight update
log_likelihood = compute_avg_log_likelihood(
data = batch_data,
label = batch_label,
weights = weights
)
log_likelihood_record.append(log_likelihood)
# update starting index of for the batches
# and if we made a complete pass over data, shuffle again
# and refresh the index that keeps track of the batch
i += batch_size
if i + batch_size > data_num:
permutation = np.random.permutation(data_num)
data, label = data[permutation], label[permutation]
i = 0
# We return the list of log likelihoods for plotting purposes.
return weights, log_likelihood_record
# manually append the coefficient term,
# every good open-source library does not
# require this additional step from the user
data = np.c_[np.ones(X.shape[0]), X]
# using our logistic regression code
weights_batch, log_likelihood_batch = logistic_regression(
data = data,
label = np.array(y),
step_size = 5e-1,
batch_size = X.shape[0], # batch gradient descent
max_iter = 200
)
# compare both logistic regression's auc score
logreg = LogisticRegression(C = 1e9)
logreg.fit(X, y)
pred_prob = logreg.predict_proba(X)[:, 1]
proba = predict_probability(data, weights_batch)
# check that the auc score is similar
auc1 = metrics.roc_auc_score(y, pred_prob)
auc2 = metrics.roc_auc_score(y, proba)
print('auc', auc1, auc2)
weights_sgd, log_likelihood_sgd = logistic_regression(
data = data,
label = y,
step_size = 5e-1,
batch_size = 30, # stochastic gradient descent
max_iter = 200
)
weights_minibatch, log_likelihood_minibatch = logistic_regression(
data = data,
label = y,
step_size = 5e-1,
batch_size = 100, # mini-batch gradient descent
max_iter = 200
)
plt.figure(figsize = (10, 7))
plt.plot(log_likelihood_sgd, label = 'stochastic gradient descent')
plt.plot(log_likelihood_batch, label = 'batch gradient descent')
plt.plot(log_likelihood_minibatch, label = 'mini-batch gradient descent')
plt.legend(loc = 'best')
plt.xlabel('# of iterations')
plt.ylabel('Average log likelihood')
plt.title('Convergence Plot')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Post-training integer quantization
Step2: Train and export the model
Step3: This training won't take long because you're training the model for just a single epoch, which trains to about 96% accuracy.
Step4: Write it out to a .tflite file
Step5: Now you have a trained MNIST model that's converted to a .tflite file, but it's still using 32-bit float values for all parameter data.
Step6: Now, in order to create quantized values with an accurate dynamic range of activations, you need to provide a representative dataset
Step7: Finally, convert the model to TensorFlow Lite format
Step8: Note how the resulting file is approximately 1/4 the size
Step9: Your model should now be fully quantized. However, if you convert a model that includes any operations that TensorFlow Lite cannot quantize, those ops are left in floating point. This allows for conversion to complete so you have a smaller and more efficient model, but the model won't be compatible with some ML accelerators that require full integer quantization. Also, by default, the converted model still use float input and outputs, which also is not compatible with some accelerators.
Step10: In this example, the resulting model size remains the same because all operations successfully quantized to begin with. However, this new model now uses quantized input and output, making it compatible with more accelerators, such as the Coral Edge TPU.
Step11: Test the models on one image
Step12: Now test the quantized model (using the uint8 data)
Step13: Evaluate the models
Step14: Repeat the evaluation on the fully quantized model using the uint8 data
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
try:
# %tensorflow_version only exists in Colab.
import tensorflow.compat.v2 as tf
except Exception:
pass
tf.enable_v2_behavior()
from tensorflow import keras
import numpy as np
import pathlib
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels)
)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
mnist_train, _ = tf.keras.datasets.mnist.load_data()
images = tf.cast(mnist_train[0], tf.float32) / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)
def representative_data_gen():
for input_value in mnist_ds.take(100):
yield [input_value]
converter.representative_dataset = representative_data_gen
tflite_model_quant = converter.convert()
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite"
tflite_model_quant_file.write_bytes(tflite_model_quant)
!ls -lh {tflite_models_dir}
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model_quant = converter.convert()
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant_io.tflite"
tflite_model_quant_file.write_bytes(tflite_model_quant)
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_quant_file))
interpreter_quant.allocate_tensors()
input_index_quant = interpreter_quant.get_input_details()[0]["index"]
output_index_quant = interpreter_quant.get_output_details()[0]["index"]
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
import matplotlib.pylab as plt
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
input_index = interpreter_quant.get_input_details()[0]["index"]
output_index = interpreter_quant.get_output_details()[0]["index"]
interpreter_quant.set_tensor(input_index, test_image)
interpreter_quant.invoke()
predictions = interpreter_quant.get_tensor(output_index)
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
# A helper function to evaluate the TF Lite model using "test" dataset.
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for test_image in test_images:
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
accurate_count = 0
for index in range(len(prediction_digits)):
if prediction_digits[index] == test_labels[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction_digits)
return accuracy
print(evaluate_model(interpreter))
# NOTE: Colab runs on server CPUs, and TensorFlow Lite currently
# doesn't have super optimized server CPU kernels. So this part may be
# slower than the above float interpreter. But for mobile CPUs, considerable
# speedup can be observed.
print(evaluate_model(interpreter_quant))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we create a training set of size num_samples and num_features.
Step2: Next we run a performance test on the created data set. Therefor we train a random forest classifier multiple times and and measure the training time. Each time we use a different number of jobs to train the classifier. We repeat the process on training sets of various sizes.
Step3: Finally we plot and evaluate our results.
Step4: The training time is inversely proportional to the number of used cpu cores.
|
<ASSISTANT_TASK:>
Python Code:
# imports
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
import time
import matplotlib.pyplot as plt
import seaborn as sns
num_samples = 500 * 1000
num_features = 40
X, y = make_classification(n_samples=num_samples, n_features=num_features)
# test different number of cores: here max 8
max_cores = 8
num_cpu_list = list(range(1,max_cores + 1))
max_sample_list = [int(l * num_samples) for l in [0.1, 0.2, 1, 0.001]]
training_times_all = []
# the default setting for classifier
clf = RandomForestClassifier()
for max_sample in max_sample_list:
training_times = []
for num_cpu in num_cpu_list:
# change number of cores
clf.set_params(n_jobs=num_cpu)
# start_time = time.time()
# train classifier on training data
tr = %timeit -o clf.fit(X[:max_sample+1], y[:max_sample+1])
# save the runtime to the list
training_times.append(tr.best)
# print logging message
print("Computing for {} samples and {} cores DONE.".format(max_sample,num_cpu))
training_times_all.append(training_times)
print("All computations DONE.")
plt.plot(num_cpu_list, training_times_all[0], 'ro', label="{}k".format(max_sample_list[0]//1000))
plt.plot(num_cpu_list, training_times_all[1], "bs" , label="{}k".format(max_sample_list[1]//1000))
plt.plot(num_cpu_list, training_times_all[2], "g^" , label="{}k".format(max_sample_list[2]//1000))
plt.axis([0, len(num_cpu_list)+1, 0, max(training_times_all[2])+1])
plt.title("Training time vs #CPU Cores")
plt.xlabel("#CPU Cores")
plt.ylabel("training time [s]")
plt.legend()
plt.show()
plt.plot(num_cpu_list, training_times_all[3], 'ro', label="{}k".format(max_sample_list[3]/1000))
plt.axis([0, len(num_cpu_list)+1, 0, max(training_times_all[3])+1])
plt.title("Training time vs #CPU Cores on small dataset")
plt.xlabel("#CPU Cores")
plt.ylabel("training time [s]")
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lo que el codigo anterior hace es asociar al nombre np todas las herramientas de la libreria numpy. Ahora podremos llamar funciones de numpy como np.<numpy_fun>. El nombre np es opcional, pueden cambiarlo pero necesitaran ese nombre para acceder a las funciones de numpy como <new_name>.<numpy_fun>. Otra opcion es solo inlcuir import numpy, en cuya caso las funciones se llaman como numpy.<numpy_fun>. Para saber mas del sistema de modulos pueden revisar la liga https
Step2: Vamos a construir una clase Array que incluye a las matrices y a los vectores. Desde el punto de vista computacional, un vector es una matriz de una columna. En clase vimos que conviene pensar a las matrices como transformacion de vectores, sin embargo, desde el punto de vista computacional, como la regla de suma y multiplicacion es similar, conviene pensarlos ambos como arrays, que es el nombre tradicional en programacion
Step3: Es posible sumar matrices y multiplicarlas por escalares
Step4: Las matrices de numpy se pueden multiplicar con la funcion matmul dentro de numpy
Step5: Los arrays the numpy pueden accesarse con indices y slices
Step6: Una fila entera
Step7: Una columna entera
Step8: Un subbloque (notar que un slice n
Step9: En numpy podemos saber la dimension de un array con el campo shape de numpy
Step10: Numpy es listo manejando listas simples como vectores
Step11: Comenzando desde cero...
Step12: El campo data de un Array almacena la lista de listas del array. Necesitamos implementar algunos metodos para que sea funcional como una clase de algebra lineal.
Step13: Por que estas diferencias? Python secretamente busca un metodo llamado __repr__ cuando un objeto es llamado sin imprimir explicitamente, y __str__ cuando se imprime con print explicitamente. Por ejemplo
Step14: Ejercicios
Step15: Prueba de las clases
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
x = [1,2,3]
y = [4,5,6]
x + y
B = np.array([[1,2,3], [4,5,6]]) # habiendo corrido import numpy as np
B + 2*B # Python sabe sumar y multiplicar arrays como algebra lineal
np.matmul(B.transpose(), B) # B^t*B
B[1,1]
B[1,:]
B[:,2]
B[0:2,0:2]
B.shape
vec = np.array([1,2,3])
print(vec)
class Array:
"Una clase minima para algebra lineal"
def __init__(self, list_of_rows):
"Constructor"
self.data = list_of_rows
self.shape = (len(list_of_rows), len(list_of_rows[0]))
A = Array([[1,2,3], [4,5,6]])
A.__dict__ # el campo escondido __dict__ permite acceder a las propiedades de clase de un objeto
A.data
A.shape
Array([[1,2,3], [4,5,6]])
print(Array([[1,2,3], [4,5,6]]))
np.array([[1,2,3], [4,5,6]])
print(np.array([[1,2,3], [4,5,6]]))
class TestClass:
def __init__(self):
pass # this means do nothing in Python
def say_hi(self):
print("Hey, I am just a normal method saying hi!")
def __repr__(self):
return "I am the special class method REPRESENTING a TestClass without printing"
def __str__(self):
return "I am the special class method for explicitly PRINTING a TestClass object"
x = TestClass()
x.say_hi()
x
print(x)
class Array:
"Una clase minima para algebra lineal"
def __init__(self, list_of_rows):
"Constructor y validador"
# obtener dimensiones
self.data = list_of_rows
nrow = len(list_of_rows)
# ___caso vector: redimensionar correctamente
if not isinstance(list_of_rows[0], list):
nrow = 1
self.data = [[x] for x in list_of_rows]
# ahora las columnas deben estar bien aunque sea un vector
ncol = len(self.data[0])
self.shape = (nrow, ncol)
# validar tamano correcto de filas
if any([len(r) != ncol for r in self.data]):
raise Exception("Las filas deben ser del mismo tamano")
# Ejercicio 1
def __repr__(self):
str2print = "Array"
for i in range(len(self.data)):
if(i==0):
str2print += str(self.data[i]) + "\n"
if(i>0):
str2print += " " + str(self.data[i]) + "\n"
return str2print
def __str__(self):
str2print = ""
for i in range(len(self.data)):
str2print += str(self.data[i]) + "\n"
return str2print
#Ejercicio2
def __getitem__(self, idx):
return self.data[idx[0]][idx[1]]
def __setitem__(self, idx, valor):
self.data[idx[0]][idx[1]] = valor
# Ejercicio 3
def zeros(x, y):
array_de_ceros = Array([[0 for col in range(y)] for row in range(x)])
return array_de_ceros
def eye(x):
array_eye = Array([[0 for col in range(x)] for row in range(x)])
for i in range(x):
for j in range(x):
if i == j:
array_eye[i,j] = 1
return array_eye
# Ejercicio 4
def transpose(self):
#Obtener dimensiones
num_row = len(self.data)
num_col = len(self.data[0])
#Crear matriz receptora
mat_transpuesta = Array([[0 for col in range(num_row)] for row in range(num_col)])
#Transponer
for i in range(num_row):
for j in range(num_col):
mat_transpuesta[j,i] = self.data[i][j]
return mat_transpuesta
def __add__(self, other):
"Hora de sumar"
if isinstance(other, Array):
if self.shape != other.shape:
raise Exception("Las dimensiones son distintas!")
rows, cols = self.shape
newArray = Array([[0. for c in range(cols)] for r in range(rows)])
for r in range(rows):
for c in range(cols):
newArray.data[r][c] = self.data[r][c] + other.data[r][c]
return newArray
elif isinstance(2, (int, float, complex)): # en caso de que el lado derecho sea solo un numero
rows, cols = self.shape
newArray = Array([[0. for c in range(cols)] for r in range(rows)])
for r in range(rows):
for c in range(cols):
newArray.data[r][c] = self.data[r][c] + other
return newArray
else:
return NotImplemented # es un tipo de error particular usado en estos metodos
#Ejercicio 5
##No me salió :(
#Ejercicio 6
def __mul__(self, other):
if isinstance(other, Array):
#Validar las dimensiones
if self.shape[1] != other.shape[0]:
raise Exception("Las matrices no son compatibles!")
#Obtener las dimensiones
num_rowsA = self.shape[0]
num_rowsB = other.shape[0]
num_colsB = other.shape[1]
#Crear matriz receptora
newArray = Array([[0 for col in range(num_colsB)] for row in range(num_rowsA)])
#Multiplicar
for i in range(num_rowsA):
for j in range(num_colsB):
for k in range(num_rowsB):
newArray[i,j] = newArray[i,j] + self.data[i][k] * other.data[k][j]
return newArray
#Matriz, entero
elif isinstance(other, (int, float, complex)):
#Obtener las dimensiones
rows, cols = self.shape
#Crear matriz receptora
newArray = Array([[0 for col in range(cols)] for row in range(rows)])
#Multiplicar
for row in range(rows):
for col in range(cols):
newArray.data[row][col] = self.data[row][col] * other
return newArray
else:
return NotImplemented
def __rmul__(self, other):
if isinstance(other, (int, float, complex)):
rows, cols = self.shape
newArray = Array([[0 for col in range(cols)] for row in range(rows)])
for row in range(rows):
for col in range(cols):
newArray.data[row][col] = self.data[row][col] * other
return newArray
else:
return NotImplemented
X = Array([[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15]])
X
print(X)
X[0,2]
X
X[0,0] = 10
X
Array.zeros(5,5)
Array.eye(4)
X.transpose()
B = Array.eye(5)
B
X*B
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the following cell, complete the code with an expression that evaluates to a list of integers derived from the raw numbers in numbers_str, assigning the value of this expression to a variable numbers. If you do everything correctly, executing the cell should produce the output 985 (not '985').
Step2: Great! We'll be using the numbers list you created above in the next few problems.
Step3: In the cell below, write an expression that evaluates to a list of the integers from numbers that are evenly divisible by three, sorted in numerical order. Expected output
Step4: Okay. You're doing great. Now, in the cell below, write an expression that evaluates to a list of the square roots of all the integers in numbers that are less than 100. In order to do this, you'll need to use the sqrt function from the math module, which I've already imported for you. Expected output
Step5: Problem set #2
Step6: Now, in the cell below, write a list comprehension that evaluates to a list of names of the planets that have a diameter greater than four earth radii. Expected output
Step7: In the cell below, write a single expression that evaluates to the sum of the mass of all planets in the solar system. Expected output
Step8: Good work. Last one with the planets. Write an expression that evaluates to the names of the planets that have the word giant anywhere in the value for their type key. Expected output
Step9: EXTREME BONUS ROUND
Step10: Problem set #3
Step11: In the cell above, I defined a variable poem_lines which has a list of lines in the poem, and imported the re library.
Step12: Good! Now, in the following cell, write a list comprehension that evaluates to a list of lines in the poem that end with a five-letter word, regardless of whether or not there is punctuation following the word at the end of the line. (Hint
Step13: Okay, now a slightly trickier one. In the cell below, I've created a string all_lines which evaluates to the entire text of the poem in one string. Execute this cell.
Step14: Now, write an expression that evaluates to all of the words in the poem that follow the word 'I'. (The strings in the resulting list should not include the I.) Hint
Step15: Finally, something super tricky. Here's a list of strings that contains a restaurant menu. Your job is to wrangle this plain text, slightly-structured data into a list of dictionaries.
Step16: You'll need to pull out the name of the dish and the price of the dish. The v after the hyphen indicates that the dish is vegetarian---you'll need to include that information in your dictionary as well. I've included the basic framework; you just need to fill in the contents of the for loop.
|
<ASSISTANT_TASK:>
Python Code:
numbers_str = '496,258,332,550,506,699,7,985,171,581,436,804,736,528,65,855,68,279,721,120'
numbers = [int(i) for i in numbers_str.split(',')] # replace 'None' with an expression, as described above
max(numbers)
sorted(numbers)[-10:]
sorted([i for i in numbers if i % 3 == 0])
from math import sqrt
[sqrt(i) for i in numbers if i < 100]
planets = [
{'diameter': 0.382,
'mass': 0.06,
'moons': 0,
'name': 'Mercury',
'orbital_period': 0.24,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 0.949,
'mass': 0.82,
'moons': 0,
'name': 'Venus',
'orbital_period': 0.62,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 1.00,
'mass': 1.00,
'moons': 1,
'name': 'Earth',
'orbital_period': 1.00,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 0.532,
'mass': 0.11,
'moons': 2,
'name': 'Mars',
'orbital_period': 1.88,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 11.209,
'mass': 317.8,
'moons': 67,
'name': 'Jupiter',
'orbital_period': 11.86,
'rings': 'yes',
'type': 'gas giant'},
{'diameter': 9.449,
'mass': 95.2,
'moons': 62,
'name': 'Saturn',
'orbital_period': 29.46,
'rings': 'yes',
'type': 'gas giant'},
{'diameter': 4.007,
'mass': 14.6,
'moons': 27,
'name': 'Uranus',
'orbital_period': 84.01,
'rings': 'yes',
'type': 'ice giant'},
{'diameter': 3.883,
'mass': 17.2,
'moons': 14,
'name': 'Neptune',
'orbital_period': 164.8,
'rings': 'yes',
'type': 'ice giant'}]
[i['name'] for i in planets if i['diameter'] > 4]
sum([i['mass'] for i in planets])
[i['name'] for i in planets if 'giant' in i['type']]
[i['name'] for i in sorted(planets, key=lambda planet: planet['moons'])]
import re
poem_lines = ['Two roads diverged in a yellow wood,',
'And sorry I could not travel both',
'And be one traveler, long I stood',
'And looked down one as far as I could',
'To where it bent in the undergrowth;',
'',
'Then took the other, as just as fair,',
'And having perhaps the better claim,',
'Because it was grassy and wanted wear;',
'Though as for that the passing there',
'Had worn them really about the same,',
'',
'And both that morning equally lay',
'In leaves no step had trodden black.',
'Oh, I kept the first for another day!',
'Yet knowing how way leads on to way,',
'I doubted if I should ever come back.',
'',
'I shall be telling this with a sigh',
'Somewhere ages and ages hence:',
'Two roads diverged in a wood, and I---',
'I took the one less travelled by,',
'And that has made all the difference.']
[line for line in poem_lines if re.search(r'\b\w{4}\s\w{4}\b', line)]
# TA-COMMENT: Could also use the ? quantifier instead of {0,1}
[line for line in poem_lines if re.search('\w{5}\.{0,1}$', line)]
all_lines = " ".join(poem_lines)
re.findall('I\s(.*?)\s', all_lines)
entrees = [
"Yam, Rosemary and Chicken Bowl with Hot Sauce $10.95",
"Lavender and Pepperoni Sandwich $8.49",
"Water Chestnuts and Peas Power Lunch (with mayonnaise) $12.95 - v",
"Artichoke, Mustard Green and Arugula with Sesame Oil over noodles $9.95 - v",
"Flank Steak with Lentils And Tabasco Pepper With Sweet Chilli Sauce $19.95",
"Rutabaga And Cucumber Wrap $8.49 - v"
]
menu = []
for item in entrees:
dictitem = {}
dictitem['name'] = re.search('(.*)\s\$', item).group(1) # why 1? 0 = whole match?
dictitem['price'] = float(re.search('\d{1,2}\.\d{2}', item).group())
dictitem['vegetarian'] = bool(re.match('.*v$', item))
menu.append(dictitem)
menu
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MarkerCluster
Step2: Terminator
Step3: Leaflet.boatmarker
Step4: Fullscreen
|
<ASSISTANT_TASK:>
Python Code:
from folium import plugins
m = folium.Map([45, 3], zoom_start=4)
plugins.ScrollZoomToggler().add_to(m)
m.save(os.path.join('results', 'Plugins_0.html'))
m
import numpy as np
N = 100
data = np.array(
[
np.random.uniform(low=35, high=60, size=N), # Random latitudes in Europe.
np.random.uniform(low=-12, high=30, size=N), # Random longitudes in Europe.
range(N), # Popups texts are simple numbers.
]
).T
m = folium.Map([45, 3], zoom_start=4)
plugins.MarkerCluster(data).add_to(m)
m.save(os.path.join('results', 'Plugins_1.html'))
m
m = folium.Map([45, 3], zoom_start=1)
plugins.Terminator().add_to(m)
m.save(os.path.join('results', 'Plugins_2.html'))
m
m = folium.Map([30, 0], zoom_start=3)
plugins.BoatMarker(
location=(34, -43),
heading=45,
wind_heading=150,
wind_speed=45,
color='#8f8'
).add_to(m)
plugins.BoatMarker(
location=(46, -30),
heading=-20,
wind_heading=46,
wind_speed=25,
color='#88f'
).add_to(m)
m.save(os.path.join('results', 'Plugins_3.html'))
m
m = folium.Map(location=[41.9, -97.3], zoom_start=4)
plugins.Fullscreen(
position='topright',
title='Expand me',
titleCancel='Exit me',
forceSeparateButton=True).add_to(m)
m.save(os.path.join('results', 'Plugins_4.html'))
m # Click on the top right button.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
vocab = set(text) #get the set of all words in the vocabulary
#print(len(vocab))
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
#print(vocab_to_int)
#print(int_to_vocab)
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
return {
".": "||Period||",
",": "||Comma||",
'"': "||Quotation_Mark||",
";": "||Semicolon||",
"!": "||Exclamation_Mark||",
"?": "||Question_Mark||",
"(": "||Left_Parenthesis||",
")": "||Right_Parenthesis||",
"--": "||Dash||",
"\n": "||Return||"
}
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
input_ = tf.placeholder(tf.int32, [None, None], name='input')
targets_ = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
return input_, targets_, learning_rate
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([lstm] * 2)
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name="initial_state")
return cell, initial_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
#print("vocab_size: {0}".format(vocab_size))
#print("embed_dim: {0}".format(embed_dim))
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
#print(embedding)
embed = tf.nn.embedding_lookup(embedding, input_data)
#print(input_data)
#print(embed)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
#print("cell: {0}".format(cell))
#print("inputs: {0}".format(inputs))
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
#print("outputs: {0}".format(outputs))
#print("final_state: {0}".format(final_state))
final_state = tf.identity(final_state, name="final_state")
return outputs, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
#print("input_data: {0}".format(input_data))
#embed = get_embed(input_data, vocab_size, embed_dim)
embed = get_embed(input_data, vocab_size, rnn_size) #each rnn_cell??
#print("embed_data: {0}".format(embed))
#print("start")
output, final_state = build_rnn(cell, embed)
logits = tf.contrib.layers.fully_connected(output, vocab_size, activation_fn=None)
#print("here")
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# calculate characters per batch
characters_per_batch = batch_size * seq_length
# calculate number of batches
n_batches = len(int_text) // characters_per_batch
# craft a new_total_size (n_batches of characters_per_batch)
new_total_size = n_batches * characters_per_batch
# truncate int_text to the new_total_size
int_text = int_text[:new_total_size]
# get the inputs batch
x = np.array(int_text)
# shift to the right for the targets, wrapping around to the beginning using list concatenation
y = np.array(int_text[1:] + [int_text[0]])
# split inputs into batches
x_batches = np.split(x.reshape(batch_size, -1), n_batches, 1)
# split targets into batches
y_batches = np.split(y.reshape(batch_size, -1), n_batches, 1)
#zip the x_batches and y_batches to initialize the new array
return np.array(list(zip(x_batches, y_batches)))
#my own little test
print(get_batches([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20], 3, 2))
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 40
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 256
# Sequence Length
seq_length = 6
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 10
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
InputTensor = loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")
return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
return int_to_vocab[np.argmax(probabilities)]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: After doing a pip install, you have to Reset Session so that the new packages are picked up. Please click on the button in the above menu.
Step2: <h2> 1. Environment variables for project and bucket </h2>
Step4: <h2> 2. Specifying query to pull the data </h2>
Step5: Try the query above in https
Step6: Run pipeline locally
Step7: Run pipleline on cloud on a larger sample size.
Step8: Once the job completes, observe the files created in Google Cloud Storage
Step9: <h2> 4. Develop model with new inputs </h2>
Step10: Complete the TODOs in taxifare/trainer/model.py so that the code below works.
Step11: <h2> 5. Train on cloud </h2>
|
<ASSISTANT_TASK:>
Python Code:
%%bash
source activate py2env
conda install -y pytz
pip uninstall -y google-cloud-dataflow
pip install --upgrade apache-beam[gcp]==2.9.0
import tensorflow as tf
import apache_beam as beam
import shutil
print(tf.__version__)
import os
REGION = 'us-central1' # Choose an available region for Cloud MLE from https://cloud.google.com/ml-engine/docs/regions.
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME. Use a regional bucket in the region you selected.
PROJECT = 'cloud-training-demos' # CHANGE THIS
# for bash
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.8'
## ensure we're using python2 env
os.environ['CLOUDSDK_PYTHON'] = 'python2'
%%bash
## ensure gcloud is up to date
gcloud components update
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
## ensure we predict locally with our current Python environment
gcloud config set ml_engine/local_python `which python`
def create_query(phase, EVERY_N):
if EVERY_N == None:
EVERY_N = 4 #use full dataset
#select and pre-process fields
base_query =
SELECT
(tolls_amount + fare_amount) AS fare_amount,
DAYOFWEEK(pickup_datetime) AS dayofweek,
HOUR(pickup_datetime) AS hourofday,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
CONCAT(STRING(pickup_datetime), STRING(pickup_longitude), STRING(pickup_latitude), STRING(dropoff_latitude), STRING(dropoff_longitude)) AS key
FROM
[nyc-tlc:yellow.trips]
WHERE
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
#add subsampling criteria by modding with hashkey
if phase == 'train':
query = "{} AND ABS(HASH(pickup_datetime)) % {} < 2".format(base_query,EVERY_N)
elif phase == 'valid':
query = "{} AND ABS(HASH(pickup_datetime)) % {} == 2".format(base_query,EVERY_N)
elif phase == 'test':
query = "{} AND ABS(HASH(pickup_datetime)) % {} == 3".format(base_query,EVERY_N)
return query
print create_query('valid', 100) #example query using 1% of data
%%bash
gsutil -m rm -rf gs://$BUCKET/taxifare/ch4/taxi_preproc/
import datetime
####
# Arguments:
# -rowdict: Dictionary. The beam bigquery reader returns a PCollection in
# which each row is represented as a python dictionary
# Returns:
# -rowstring: a comma separated string representation of the record with dayofweek
# converted from int to string (e.g. 3 --> Tue)
####
def to_csv(rowdict):
days = ['null', 'Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
CSV_COLUMNS = 'fare_amount,dayofweek,hourofday,pickuplon,pickuplat,dropofflon,dropofflat,passengers,key'.split(',')
rowdict['dayofweek'] = days[rowdict['dayofweek']]
rowstring = ','.join([str(rowdict[k]) for k in CSV_COLUMNS])
return rowstring
####
# Arguments:
# -EVERY_N: Integer. Sample one out of every N rows from the full dataset.
# Larger values will yield smaller sample
# -RUNNER: 'DirectRunner' or 'DataflowRunner'. Specfy to run the pipeline
# locally or on Google Cloud respectively.
# Side-effects:
# -Creates and executes dataflow pipeline.
# See https://beam.apache.org/documentation/programming-guide/#creating-a-pipeline
####
def preprocess(EVERY_N, RUNNER):
job_name = 'preprocess-taxifeatures' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')
print 'Launching Dataflow job {} ... hang on'.format(job_name)
OUTPUT_DIR = 'gs://{0}/taxifare/ch4/taxi_preproc/'.format(BUCKET)
#dictionary of pipeline options
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': 'preprocess-taxifeatures' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S'),
'project': PROJECT,
'runner': RUNNER
}
#instantiate PipelineOptions object using options dictionary
opts = beam.pipeline.PipelineOptions(flags=[], **options)
#instantantiate Pipeline object using PipelineOptions
with beam.Pipeline(options=opts) as p:
for phase in ['train', 'valid']:
query = create_query(phase, EVERY_N)
outfile = os.path.join(OUTPUT_DIR, '{}.csv'.format(phase))
(
p | 'read_{}'.format(phase) >> ##TODO: read from BigQuery
| 'tocsv_{}'.format(phase) >> ##TODO: apply the to_csv function to every row
| 'write_{}'.format(phase) >> ##TODO: write to outfile
)
print("Done")
preprocess(50*10000, 'DirectRunner')
preprocess(50*100, 'DataflowRunner')
#change first arg to None to preprocess full dataset
%%bash
gsutil ls -l gs://$BUCKET/taxifare/ch4/taxi_preproc/
%%bash
#print first 10 lines of first shard of train.csv
gsutil cat "gs://$BUCKET/taxifare/ch4/taxi_preproc/train.csv-00000-of-*" | head
%%bash
mkdir sample
gsutil cp "gs://$BUCKET/taxifare/ch4/taxi_preproc/train.csv-00000-of-*" sample/train.csv
gsutil cp "gs://$BUCKET/taxifare/ch4/taxi_preproc/valid.csv-00000-of-*" sample/valid.csv
%%bash
rm -rf taxifare.tar.gz taxi_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python -m trainer.task \
--train_data_paths=${PWD}/sample/train.csv \
--eval_data_paths=${PWD}/sample/valid.csv \
--output_dir=${PWD}/taxi_trained \
--train_steps=1000 \
--job-dir=/tmp
!ls taxi_trained/export/exporter/
%%writefile /tmp/test.json
{"dayofweek": "Sun", "hourofday": 17, "pickuplon": -73.885262, "pickuplat": 40.773008, "dropofflon": -73.987232, "dropofflat": 40.732403, "passengers": 2}
%%bash
model_dir=$(ls ${PWD}/taxi_trained/export/exporter)
gcloud ai-platform local predict \
--model-dir=${PWD}/taxi_trained/export/exporter/${model_dir} \
--json-instances=/tmp/test.json
#if gcloud ai-platform local predict fails, might need to update glcoud
#!gcloud --quiet components update
%%bash
OUTDIR=gs://${BUCKET}/taxifare/ch4/taxi_trained
JOBNAME=lab4a_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC \
--runtime-version=$TFVERSION \
-- \
--train_data_paths="gs://$BUCKET/taxifare/ch4/taxi_preproc/train*" \
--eval_data_paths="gs://${BUCKET}/taxifare/ch4/taxi_preproc/valid*" \
--train_steps=5000 \
--output_dir=$OUTDIR
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.